aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/ion/Kconfig14
-rw-r--r--drivers/gpu/ion/ion_system_mapper.c114
-rw-r--r--drivers/staging/android/Kconfig2
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/android_alarm.h44
-rw-r--r--drivers/staging/android/ashmem.h30
-rw-r--r--drivers/staging/android/binder.h306
-rw-r--r--drivers/staging/android/ion/Kconfig24
-rw-r--r--drivers/staging/android/ion/Makefile (renamed from drivers/gpu/ion/Makefile)1
-rw-r--r--drivers/staging/android/ion/compat_ion.c (renamed from drivers/gpu/ion/compat_ion.c)27
-rw-r--r--drivers/staging/android/ion/compat_ion.h (renamed from drivers/gpu/ion/compat_ion.h)0
-rw-r--r--drivers/staging/android/ion/ion.c (renamed from drivers/gpu/ion/ion.c)302
-rw-r--r--drivers/staging/android/ion/ion.h204
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c (renamed from drivers/gpu/ion/ion_carveout_heap.c)121
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c (renamed from drivers/gpu/ion/ion_chunk_heap.c)74
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c (renamed from drivers/gpu/ion/ion_cma_heap.c)37
-rw-r--r--drivers/staging/android/ion/ion_heap.c (renamed from drivers/gpu/ion/ion_heap.c)112
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c (renamed from drivers/gpu/ion/ion_page_pool.c)19
-rw-r--r--drivers/staging/android/ion/ion_priv.h (renamed from drivers/gpu/ion/ion_priv.h)31
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c (renamed from drivers/gpu/ion/ion_system_heap.c)143
-rw-r--r--drivers/staging/android/ion/ion_test.c281
-rw-r--r--drivers/staging/android/ion/tegra/Makefile (renamed from drivers/gpu/ion/tegra/Makefile)0
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c (renamed from drivers/gpu/ion/tegra/tegra_ion.c)2
-rw-r--r--drivers/staging/android/sw_sync.h19
-rw-r--r--drivers/staging/android/sync.h86
-rw-r--r--drivers/staging/android/uapi/android_alarm.h62
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/binder.h330
-rw-r--r--drivers/staging/android/uapi/ion.h196
-rw-r--r--drivers/staging/android/uapi/ion_test.h71
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/usb/gadget/f_mtp.c11
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/adf/adf.c28
-rw-r--r--drivers/video/adf/adf_client.c3
-rw-r--r--drivers/video/adf/adf_fbdev.c4
-rw-r--r--drivers/video/adf/adf_sysfs.c16
39 files changed, 1858 insertions, 1038 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index a2efb62aa77..d8a22c2a579 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,2 +1,2 @@
-obj-y += drm/ vga/ ion/
+obj-y += drm/ vga/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
deleted file mode 100644
index c62f2cbb9e8..00000000000
--- a/drivers/gpu/ion/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-menuconfig ION
- tristate "Ion Memory Manager"
- depends on ARM
- select GENERIC_ALLOCATOR
- select DMA_SHARED_BUFFER
- help
- Chose this option to enable the ION Memory Manager.
-
-config ION_TEGRA
- tristate "Ion for Tegra"
- depends on ARCH_TEGRA && ION
- help
- Choose this option if you wish to use ion on an nVidia Tegra.
-
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
deleted file mode 100644
index 692458e07b5..00000000000
--- a/drivers/gpu/ion/ion_system_mapper.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_mapper.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/memory.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-/*
- * This mapper is valid for any heap that allocates memory that already has
- * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
- * pages obtained via io_remap, etc.
- */
-static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping **mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
- pr_err("%s: attempting to map an unsupported heap\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- /* XXX REVISIT ME!!! */
- *((unsigned long *)mapping) = (unsigned long)buffer->priv;
- return buffer->priv;
-}
-
-static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping *mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask))
- pr_err("%s: attempting to unmap an unsupported heap\n",
- __func__);
-}
-
-static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping *mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
- pr_err("%s: attempting to unmap an unsupported heap\n",
- __func__);
- return ERR_PTR(-EINVAL);
- }
- return buffer->priv;
-}
-
-static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct vm_area_struct *vma,
- struct ion_mapping *mapping)
-{
- int ret;
-
- switch (buffer->heap->type) {
- case ION_HEAP_KMALLOC:
- {
- unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
- ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- break;
- }
- case ION_HEAP_VMALLOC:
- ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
- break;
- default:
- pr_err("%s: attempting to map unsupported heap to userspace\n",
- __func__);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct ion_mapper_ops ops = {
- .map = ion_kernel_mapper_map,
- .map_kernel = ion_kernel_mapper_map_kernel,
- .map_user = ion_kernel_mapper_map_user,
- .unmap = ion_kernel_mapper_unmap,
-};
-
-struct ion_mapper *ion_system_mapper_create(void)
-{
- struct ion_mapper *mapper;
- mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
- if (!mapper)
- return ERR_PTR(-ENOMEM);
- mapper->type = ION_SYSTEM_MAPPER;
- mapper->ops = &ops;
- mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
- return mapper;
-}
-
-void ion_system_mapper_destroy(struct ion_mapper *mapper)
-{
- kfree(mapper);
-}
-
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index fe11eb6fa96..6da535db253 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -108,6 +108,8 @@ config SW_SYNC_USER
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+source "drivers/staging/android/ion/Kconfig"
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299e05a..0a01e191490 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,5 +1,7 @@
ccflags-y += -I$(src) # needed for trace events
+obj-y += ion/
+
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index 4fd32f337f9..495b20cf3bf 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -16,50 +16,10 @@
#ifndef _LINUX_ANDROID_ALARM_H
#define _LINUX_ANDROID_ALARM_H
-#include <linux/ioctl.h>
-#include <linux/time.h>
#include <linux/compat.h>
+#include <linux/ioctl.h>
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
#ifdef CONFIG_COMPAT
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d3adf..5abcfd7aa70 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
#include <linux/ioctl.h>
#include <linux/compat.h>
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index dbe81ceca1b..d4101a67171 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -20,311 +20,7 @@
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
-#include <linux/ioctl.h>
-
-#define B_PACK_CHARS(c1, c2, c3, c4) \
- ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
- BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
- FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
- FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes. The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur. The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- unsigned long type;
- unsigned long flags;
-
- /* 8 bytes of data. */
- union {
- void __user *binder; /* local object */
- signed long handle; /* remote object */
- };
-
- /* extra data associated with local object */
- void __user *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
- signed long write_size; /* bytes to write */
- signed long write_consumed; /* bytes consumed by driver */
- unsigned long write_buffer;
- signed long read_size; /* bytes to read */
- signed long read_consumed; /* bytes consumed by driver */
- unsigned long read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
- /* driver protocol version -- increment with incompatible change */
- signed long protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted. This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process. That is, the process is being destroyed.
- * You should handle this by exiting from your process. Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
- TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
- TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
- TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
- TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
- /* The first two are only used for bcTRANSACTION and brTRANSACTION,
- * identifying the target and contents of the transaction.
- */
- union {
- size_t handle; /* target descriptor of command transaction */
- void *ptr; /* target descriptor of return transaction */
- } target;
- void *cookie; /* target object cookie */
- unsigned int code; /* transaction command */
-
- /* General information about the transaction. */
- unsigned int flags;
- pid_t sender_pid;
- uid_t sender_euid;
- size_t data_size; /* number of bytes of data */
- size_t offsets_size; /* number of bytes of offsets */
-
- /* If this transaction is inline, the data immediately
- * follows here; otherwise, it ends with a pointer to
- * the data buffer.
- */
- union {
- struct {
- /* transaction data */
- const void __user *buffer;
- /* offsets from buffer to flat_binder_object structs */
- const void __user *offsets;
- } ptr;
- uint8_t buf[8];
- } data;
-};
-
-struct binder_ptr_cookie {
- void *ptr;
- void *cookie;
-};
-
-struct binder_pri_desc {
- int priority;
- int desc;
-};
-
-struct binder_pri_ptr_cookie {
- int priority;
- void *ptr;
- void *cookie;
-};
-
-enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, int),
- /*
- * int: error code
- */
-
- BR_OK = _IO('r', 1),
- /* No parameters! */
-
- BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
- BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
- /*
- * binder_transaction_data: the received command.
- */
-
- BR_ACQUIRE_RESULT = _IOR('r', 4, int),
- /*
- * not currently supported
- * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
- * Else the remote object has acquired a primary reference.
- */
-
- BR_DEAD_REPLY = _IO('r', 5),
- /*
- * The target of the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
- */
-
- BR_TRANSACTION_COMPLETE = _IO('r', 6),
- /*
- * No parameters... always refers to the last transaction requested
- * (including replies). Note that this will be sent even for
- * asynchronous transactions.
- */
-
- BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
- BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
- BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
- BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
- /*
- * not currently supported
- * int: priority
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_NOOP = _IO('r', 12),
- /*
- * No parameters. Do nothing and examine the next command. It exists
- * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
- */
-
- BR_SPAWN_LOOPER = _IO('r', 13),
- /*
- * No parameters. The driver has determined that a process has no
- * threads waiting to service incoming transactions. When a process
- * receives this command, it must spawn a new service thread and
- * register it via bcENTER_LOOPER.
- */
-
- BR_FINISHED = _IO('r', 14),
- /*
- * not currently supported
- * stop threadpool thread
- */
-
- BR_DEAD_BINDER = _IOR('r', 15, void *),
- /*
- * void *: cookie
- */
- BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
- /*
- * void *: cookie
- */
-
- BR_FAILED_REPLY = _IO('r', 17),
- /*
- * The the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
- */
-};
-
-enum binder_driver_command_protocol {
- BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
- BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
- /*
- * binder_transaction_data: the sent command.
- */
-
- BC_ACQUIRE_RESULT = _IOW('c', 2, int),
- /*
- * not currently supported
- * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
- * Else you have acquired a primary reference on the object.
- */
-
- BC_FREE_BUFFER = _IOW('c', 3, int),
- /*
- * void *: ptr to transaction data received on a read
- */
-
- BC_INCREFS = _IOW('c', 4, int),
- BC_ACQUIRE = _IOW('c', 5, int),
- BC_RELEASE = _IOW('c', 6, int),
- BC_DECREFS = _IOW('c', 7, int),
- /*
- * int: descriptor
- */
-
- BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
- BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
- /*
- * not currently supported
- * int: priority
- * int: descriptor
- */
-
- BC_REGISTER_LOOPER = _IO('c', 11),
- /*
- * No parameters.
- * Register a spawned looper thread with the device.
- */
-
- BC_ENTER_LOOPER = _IO('c', 12),
- BC_EXIT_LOOPER = _IO('c', 13),
- /*
- * No parameters.
- * These two commands are sent as an application-level thread
- * enters and exits the binder loop, respectively. They are
- * used so the binder can have an accurate count of the number
- * of looping threads it has available.
- */
-
- BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
- /*
- * void *: cookie
- */
-};
+#include "uapi/binder.h"
#endif /* _LINUX_BINDER_H */
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 00000000000..a342d96ee4f
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,24 @@
+menuconfig ION
+ tristate "Ion Memory Manager"
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
+ help
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/staging/android/ion/Makefile
index 9c956659124..75039b98eeb 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION) += compat_ion.o
endif
diff --git a/drivers/gpu/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index e0d2839952a..e9a8132cd56 100644
--- a/drivers/gpu/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -14,14 +14,14 @@
*
*/
-#include <linux/ion.h>
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include "ion.h"
#include "compat_ion.h"
-/* See include/linux/ion.h for the definition of these structs */
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
struct compat_ion_allocation_data {
compat_size_t len;
compat_size_t align;
@@ -35,6 +35,12 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
+ struct compat_ion_custom_data)
+
static int compat_get_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -85,8 +91,8 @@ static int compat_get_ion_custom_data(
struct compat_ion_custom_data __user *data32,
struct ion_custom_data __user *data)
{
- compat_uint_t cmd;
- compat_ulong_t arg;
+ compat_uint_t cmd;
+ compat_ulong_t arg;
int err;
err = get_user(cmd, &data32->cmd);
@@ -105,7 +111,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENOTTY;
switch (cmd) {
- case ION_IOC_ALLOC:
+ case COMPAT_ION_IOC_ALLOC:
{
struct compat_ion_allocation_data __user *data32;
struct ion_allocation_data __user *data;
@@ -119,13 +125,12 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err = compat_get_ion_allocation_data(data32, data);
if (err)
return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, cmd,
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
(unsigned long)data);
err = compat_put_ion_allocation_data(data32, data);
return ret ? ret : err;
}
- case ION_IOC_FREE:
+ case COMPAT_ION_IOC_FREE:
{
struct compat_ion_allocation_data __user *data32;
struct ion_allocation_data __user *data;
@@ -140,10 +145,10 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
return err;
- return filp->f_op->unlocked_ioctl(filp, cmd,
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
(unsigned long)data);
}
- case ION_IOC_CUSTOM: {
+ case COMPAT_ION_IOC_CUSTOM: {
struct compat_ion_custom_data __user *data32;
struct ion_custom_data __user *data;
int err;
@@ -157,7 +162,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
return err;
- return filp->f_op->unlocked_ioctl(filp, cmd,
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
(unsigned long)data);
}
case ION_IOC_SHARE:
diff --git a/drivers/gpu/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
index 3a9c8c08c24..3a9c8c08c24 100644
--- a/drivers/gpu/ion/compat_ion.h
+++ b/drivers/staging/android/ion/compat_ion.h
diff --git a/drivers/gpu/ion/ion.c b/drivers/staging/android/ion/ion.c
index e4ffc9d5b94..7522b0be174 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -20,7 +20,6 @@
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
-#include <linux/ion.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
@@ -37,6 +36,7 @@
#include <linux/dma-buf.h>
#include <linux/idr.h>
+#include "ion.h"
#include "ion_priv.h"
#include "compat_ion.h"
@@ -110,8 +110,8 @@ struct ion_handle {
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
@@ -202,7 +202,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
heap->ops->free(buffer);
@@ -224,7 +225,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
- for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
@@ -383,7 +384,14 @@ static void ion_handle_get(struct ion_handle *handle)
static int ion_handle_put(struct ion_handle *handle)
{
- return kref_put(&handle->ref, ion_handle_destroy);
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_unlock(&client->lock);
+
+ return ret;
}
static struct ion_handle *ion_handle_lookup(struct ion_client *client,
@@ -403,34 +411,39 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
{
- return idr_find(&client->idr, id);
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
}
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
{
- return (ion_uhandle_get(client, handle->id) == handle);
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return (idr_find(&client->idr, handle->id) == handle);
}
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
- int rc;
+ int id;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
- do {
- int id;
- rc = idr_pre_get(&client->idr, GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
- rc = idr_get_new_above(&client->idr, handle, 1, &id);
- handle->id = id;
- } while (rc == -EAGAIN);
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
- if (rc < 0)
- return rc;
+ handle->id = id;
while (*p) {
parent = *p;
@@ -460,7 +473,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
struct ion_heap *heap;
int ret;
- pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
@@ -468,11 +481,11 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
- if (WARN_ON(!len))
- return ERR_PTR(-EINVAL);
-
len = PAGE_ALIGN(len);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
@@ -503,11 +516,11 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
- mutex_unlock(&client->lock);
return handle;
}
@@ -527,8 +540,8 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
mutex_unlock(&client->lock);
return;
}
- ion_handle_put(handle);
mutex_unlock(&client->lock);
+ ion_handle_put(handle);
}
EXPORT_SYMBOL(ion_free);
@@ -567,7 +580,8 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -658,7 +672,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&client->lock);
@@ -677,7 +691,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i])
continue;
- seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
}
return 0;
}
@@ -769,7 +783,6 @@ void ion_client_destroy(struct ion_client *client)
ion_handle_destroy(&handle->ref);
}
- idr_remove_all(&client->idr);
idr_destroy(&client->idr);
down_write(&dev->lock);
@@ -823,6 +836,22 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
}
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
struct ion_vma_list {
struct list_head list;
struct vm_area_struct *vma;
@@ -847,7 +876,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct page *page = buffer->pages[i];
if (ion_buffer_page_is_dirty(page))
- __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
ion_buffer_page_clean(buffer->pages + i);
}
list_for_each_entry(vma_list, &buffer->vmas, list) {
@@ -859,17 +890,18 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
mutex_unlock(&buffer->lock);
}
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
int ret;
mutex_lock(&buffer->lock);
ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
-
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- ion_buffer_page(buffer->pages[vmf->pgoff]));
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
@@ -910,7 +942,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
mutex_unlock(&buffer->lock);
}
-struct vm_operations_struct ion_vma_ops = {
+static struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
@@ -928,6 +960,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
}
if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
@@ -999,7 +1033,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_unlock(&buffer->lock);
}
-struct dma_buf_ops dma_buf_ops = {
+static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
@@ -1021,14 +1055,15 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
- mutex_unlock(&client->lock);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
-
buffer = handle->buffer;
ion_buffer_get(buffer);
+ mutex_unlock(&client->lock);
+
dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
@@ -1081,18 +1116,24 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
ion_handle_get(handle);
+ mutex_unlock(&client->lock);
goto end;
}
+ mutex_unlock(&client->lock);
+
handle = ion_handle_create(client, buffer);
if (IS_ERR(handle))
goto end;
+
+ mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
+
end:
- mutex_unlock(&client->lock);
dma_buf_put(dmabuf);
return handle;
}
@@ -1122,110 +1163,120 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
return 0;
}
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
switch (cmd) {
case ION_IOC_ALLOC:
{
- struct ion_allocation_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = ion_alloc(client, data.len, data.align,
- data.heap_id_mask, data.flags);
-
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
if (IS_ERR(handle))
return PTR_ERR(handle);
- data.handle = handle->id;
+ data.allocation.handle = handle->id;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- ion_free(client, handle);
- return -EFAULT;
- }
+ cleanup_handle = handle;
break;
}
case ION_IOC_FREE:
{
- struct ion_handle_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_handle_data)))
- return -EFAULT;
- mutex_lock(&client->lock);
- handle = ion_uhandle_get(client, data.handle);
- mutex_unlock(&client->lock);
- if (!handle)
- return -EINVAL;
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
ion_free(client, handle);
+ ion_handle_put(handle);
break;
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
- struct ion_fd_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = ion_uhandle_get(client, data.handle);
- data.fd = ion_share_dma_buf_fd(client, handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- if (data.fd < 0)
- return data.fd;
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
break;
}
case ION_IOC_IMPORT:
{
- struct ion_fd_data data;
struct ion_handle *handle;
- int ret = 0;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- handle = ion_import_dma_buf(client, data.fd);
+ handle = ion_import_dma_buf(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
- data.handle = handle->id;
-
- if (copy_to_user((void __user *)arg, &data,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- if (ret < 0)
- return ret;
+ data.handle.handle = handle->id;
break;
}
case ION_IOC_SYNC:
{
- struct ion_fd_data data;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- ion_sync_for_device(client, data.fd);
+ ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM:
{
- struct ion_device *dev = client->dev;
- struct ion_custom_data data;
-
if (!dev->custom_ioctl)
return -ENOTTY;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_custom_data)))
- return -EFAULT;
- return dev->custom_ioctl(client, data.cmd, data.arg);
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
}
default:
return -ENOTTY;
}
- return 0;
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
}
static int ion_release(struct inode *inode, struct file *file)
@@ -1299,10 +1350,10 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
client->pid, size);
} else {
- seq_printf(s, "%16.s %16u %16u\n", client->name,
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
client->pid, size);
}
}
@@ -1317,19 +1368,20 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
continue;
total_size += buffer->size;
if (!buffer->handle_count) {
- seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
- buffer->pid, buffer->size, buffer->kmap_cnt,
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
atomic_read(&buffer->ref.refcount));
total_orphaned_size += buffer->size;
}
}
mutex_unlock(&dev->buffer_lock);
seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "%16.s %16u\n", "total orphaned",
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
total_orphaned_size);
- seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16.s %16u\n", "deferred free",
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
heap->free_list_size);
seq_printf(s, "----------------------------------------------------\n");
@@ -1354,39 +1406,39 @@ static const struct file_operations debug_heap_fops = {
#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- if (!val)
- return 0;
+ if (!val)
+ return 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
- heap->shrinker.shrink(&heap->shrinker, &sc);
- return 0;
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- *val = objs;
- return 0;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
+ debug_shrink_set, "%llu\n");
#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
@@ -1485,11 +1537,11 @@ void __init ion_reserve(struct ion_platform_data *data)
int ret = memblock_reserve(data->heaps[i].base,
data->heaps[i].size);
if (ret)
- pr_err("memblock reserve of %x@%lx failed\n",
+ pr_err("memblock reserve of %zx@%lx failed\n",
data->heaps[i].size,
data->heaps[i].base);
}
- pr_info("%s: %s reserved base %lx size %d\n", __func__,
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
data->heaps[i].name,
data->heaps[i].base,
data->heaps[i].size);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 00000000000..dcd2a0cdb19
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 86f35545eaf..5165de2ce34 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -14,19 +14,17 @@
*
*/
#include <linux/spinlock.h>
-
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
-#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include "ion.h"
#include "ion_priv.h"
-#include <asm/mach/map.h>
-
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
@@ -62,7 +60,11 @@ static int ion_carveout_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = buffer->priv_phys;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
*len = buffer->size;
return 0;
}
@@ -72,75 +74,66 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
unsigned long size, unsigned long align,
unsigned long flags)
{
- buffer->priv_phys = ion_carveout_allocate(heap, size, align);
- return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
-
- ion_carveout_free(heap, buffer->priv_phys, buffer->size);
- buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
struct sg_table *table;
+ ion_phys_addr_t paddr;
int ret;
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
}
- sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
- 0);
- return table;
-}
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- sg_free_table(buffer->sg_table);
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
}
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
- void *ret;
- int mtype = MT_MEMORY_NONCACHED;
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
- if (buffer->flags & ION_FLAG_CACHED)
- mtype = MT_MEMORY;
+ ion_heap_buffer_zero(buffer);
- ret = __arm_ioremap(buffer->priv_phys, buffer->size,
- mtype);
- if (ret == NULL)
- return ERR_PTR(-ENOMEM);
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
- return ret;
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
}
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- __arm_iounmap(buffer->vaddr);
- buffer->vaddr = NULL;
- return;
+ return buffer->priv_virt;
}
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- return remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- pgprot_noncached(vma->vm_page_prot));
+ return;
}
static struct ion_heap_ops carveout_heap_ops = {
@@ -149,14 +142,27 @@ static struct ion_heap_ops carveout_heap_ops = {
.phys = ion_carveout_heap_phys,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
- .map_user = ion_carveout_heap_map_user,
- .map_kernel = ion_carveout_heap_map_kernel,
- .unmap_kernel = ion_carveout_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
@@ -172,6 +178,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-1);
carveout_heap->heap.ops = &carveout_heap_ops;
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
return &carveout_heap->heap;
}
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index cd01aad6ac2..ca20d627960 100644
--- a/drivers/gpu/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -13,20 +13,17 @@
* GNU General Public License for more details.
*
*/
-//#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
-#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include "ion.h"
#include "ion_priv.h"
-#include <asm/mach/map.h>
-
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
@@ -49,8 +46,8 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
unsigned long num_chunks;
unsigned long allocated_size;
- if (ion_buffer_fault_user_mappings(buffer))
- return -ENOMEM;
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
@@ -73,7 +70,8 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
chunk_heap->chunk_size);
if (!paddr)
goto err;
- sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
@@ -84,7 +82,7 @@ err:
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
+ sg->length);
sg = sg_next(sg);
}
sg_free_table(table);
@@ -106,27 +104,27 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer);
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
for_each_sg(table->sgl, sg, table->nents, i) {
- if (ion_buffer_cached(buffer))
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
- sg_dma_len(sg), DMA_BIDIRECTIONAL);
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
+ sg->length);
}
chunk_heap->allocated -= allocated_size;
sg_free_table(table);
kfree(table);
}
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -144,10 +142,18 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_chunk_heap *chunk_heap;
- struct vm_struct *vm_struct;
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
- int i, ret;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
@@ -164,39 +170,15 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0;
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct) {
- ret = -ENOMEM;
- goto error;
- }
- for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
- struct page *page = phys_to_page(chunk_heap->base + i);
- struct page **pages = &page;
-
- ret = map_vm_area(vm_struct, pgprot, &pages);
- if (ret)
- goto error_map_vm_area;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
- }
- free_vm_area(vm_struct);
-
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
- heap_data->size, DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_info("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+ heap_data->size, heap_data->align);
return &chunk_heap->heap;
-error_map_vm_area:
- free_vm_area(vm_struct);
-error:
- gen_pool_destroy(chunk_heap->pool);
error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 1eaa8c11e04..4418bda7647 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -16,13 +16,12 @@
*/
#include <linux/device.h>
-#include <linux/ion.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
-/* for ion_heap_ops structure */
+#include "ion.h"
#include "ion_priv.h"
#define ION_CMA_ALLOCATE_FAILED -1
@@ -45,8 +44,8 @@ struct ion_cma_buffer_info {
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
int ret;
@@ -70,13 +69,20 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "Can't allocate buffer info\n");
return ION_CMA_ALLOCATE_FAILED;
}
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -129,8 +135,8 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
- info->handle);
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
*addr = info->handle;
*len = buffer->size;
@@ -138,16 +144,16 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -163,13 +169,19 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
buffer->size);
}
-void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */
return info->cpu_addr;
}
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
@@ -178,6 +190,7 @@ static struct ion_heap_ops ion_cma_ops = {
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 786302de7ed..5b01e9e30a4 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -16,13 +16,13 @@
#include <linux/err.h>
#include <linux/freezer.h>
-#include <linux/ion.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include "ion.h"
#include "ion_priv.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
- return 0;
+ return NULL;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
@@ -46,12 +46,11 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
pgprot = pgprot_writecombine(PAGE_KERNEL);
for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
+ for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
- }
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
@@ -76,23 +75,26 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
struct scatterlist *sg;
int i;
+ int ret;
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg_dma_len(sg);
+ unsigned long len = sg->length;
- if (offset >= sg_dma_len(sg)) {
- offset -= sg_dma_len(sg);
+ if (offset >= sg->length) {
+ offset -= sg->length;
continue;
} else if (offset) {
page += offset / PAGE_SIZE;
- len = sg_dma_len(sg) - offset;
+ len = sg->length - offset;
offset = 0;
}
len = min(len, remainder);
- remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
vma->vm_page_prot);
+ if (ret)
+ return ret;
addr += len;
if (addr >= vma->vm_end)
return 0;
@@ -100,71 +102,63 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
- struct sg_table *table = buffer->sg_table;
- pgprot_t pgprot;
- struct scatterlist *sg;
- struct vm_struct *vm_struct;
- int i, j, ret = 0;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct)
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long len = sg_dma_len(sg);
+ return 0;
+}
- for (j = 0; j < len / PAGE_SIZE; j++) {
- struct page *sub_page = page + j;
- struct page **pages = &sub_page;
- ret = map_vm_area(vm_struct, pgprot, &pages);
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
if (ret)
- goto end;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr,
- PAGE_SIZE);
+ return ret;
+ p = 0;
}
}
-end:
- free_vm_area(vm_struct);
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
return ret;
}
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
- unsigned int order)
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
{
- struct page *page = alloc_pages(gfp_flags, order);
-
- if (!page)
- return page;
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
- if (ion_buffer_fault_user_mappings(buffer))
- split_page(page, order);
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
- return page;
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
}
-void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
- unsigned int order)
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
{
- int i;
+ struct scatterlist sg;
- if (!ion_buffer_fault_user_mappings(buffer)) {
- __free_pages(page, order);
- return;
- }
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
}
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
@@ -200,16 +194,16 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
if (total_drained >= size)
break;
list_del(&buffer->list);
- ion_buffer_destroy(buffer);
heap->free_list_size -= buffer->size;
total_drained += buffer->size;
+ ion_buffer_destroy(buffer);
}
rt_mutex_unlock(&heap->lock);
return total_drained;
}
-int ion_heap_deferred_free(void *data)
+static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
@@ -281,7 +275,7 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
}
if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
__func__, heap_data->name, heap_data->type,
heap_data->base, heap_data->size);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 7e00f51292e..f087a02770a 100644
--- a/drivers/gpu/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -34,13 +34,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- /* this is only being used to flush the page for dma,
- this api is not really suitable for calling from a driver
- but no better way to flush a page for dma exist at this time */
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(page)),
- PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
@@ -113,7 +108,7 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool)
return page;
}
-void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
{
int ret;
@@ -139,7 +134,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int i;
bool high;
- high = gfp_mask & __GFP_HIGHMEM;
+ high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
@@ -148,10 +143,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
struct page *page;
mutex_lock(&pool->mutex);
- if (high && pool->high_count) {
- page = ion_page_pool_remove(pool, true);
- } else if (pool->low_count) {
+ if (pool->low_count) {
page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
} else {
mutex_unlock(&pool->mutex);
break;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 32461e94673..19691c0d24c 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,7 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
-#include <linux/ion.h>
+#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
@@ -26,6 +26,8 @@
#include <linux/shrinker.h>
#include <linux/types.h>
+#include "ion.h"
+
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
/**
@@ -213,19 +215,7 @@ void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
-
-/**
- * ion_heap_alloc_pages - allocate pages from alloc_pages
- * @buffer: the buffer to allocate for, used to extract the flags
- * @gfp_flags: the gfp_t for the allocation
- * @order: the order of the allocatoin
- *
- * This funciton allocations from alloc pages and also does any other
- * necessary operations based on the buffer->flags. For buffers which
- * will be faulted in the pages are split using split_page
- */
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
- unsigned int order);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
@@ -240,7 +230,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
* ion_heap_freelist_add - add a buffer to the deferred free list
* @heap: the heap
- * @buffer: the buffer
+ * @buffer: the buffer
*
* Adds an item to the deferred freelist.
*/
@@ -356,4 +346,15 @@ void ion_page_pool_free(struct ion_page_pool *, struct page *);
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan);
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 5fe81a76f2f..05e9dc93980 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -18,19 +18,17 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/highmem.h>
-#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include "ion.h"
#include "ion_priv.h"
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY) &
- ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN);
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
@@ -74,15 +72,14 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
if (order > 4)
gfp_flags = high_order_gfp_flags;
- page = ion_heap_alloc_pages(buffer, gfp_flags, order);
+ page = alloc_pages(gfp_flags, order);
if (!page)
- return 0;
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(page)),
- PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
}
if (!page)
- return 0;
+ return NULL;
return page;
}
@@ -92,15 +89,10 @@ static void free_buffer_page(struct ion_system_heap *heap,
unsigned int order)
{
bool cached = ion_buffer_cached(buffer);
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
- int i;
if (!cached) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
- } else if (split_pages) {
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
} else {
__free_pages(page, order);
}
@@ -151,9 +143,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
@@ -161,8 +157,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
max_order = info->order;
i++;
}
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
goto err;
@@ -184,14 +179,14 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
err1:
kfree(table);
err:
- list_for_each_entry(info, &pages, list) {
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
return -ENOMEM;
}
-void ion_system_heap_free(struct ion_buffer *buffer)
+static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
@@ -210,19 +205,19 @@ void ion_system_heap_free(struct ion_buffer *buffer)
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
- get_order(sg_dma_len(sg)));
+ get_order(sg->length));
sg_free_table(table);
kfree(table);
}
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -363,61 +358,83 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
unsigned long align,
unsigned long flags)
{
- buffer->priv_virt = kzalloc(len, GFP_KERNEL);
- if (!buffer->priv_virt)
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
}
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- kfree(buffer->priv_virt);
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
}
static int ion_system_contig_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = virt_to_phys(buffer->priv_virt);
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
*len = buffer->size;
return 0;
}
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct sg_table *table;
- int ret;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
- }
- sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
- 0);
- return table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
+ return buffer->priv_virt;
}
-int ion_system_contig_heap_map_user(struct ion_heap *heap,
- struct ion_buffer *buffer,
- struct vm_area_struct *vma)
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
- return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-
}
static struct ion_heap_ops kmalloc_ops = {
@@ -428,7 +445,7 @@ static struct ion_heap_ops kmalloc_ops = {
.unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_system_contig_heap_map_user,
+ .map_user = ion_heap_map_user,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 00000000000..3e20349baf7
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,281 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
index 11cd003fb08..11cd003fb08 100644
--- a/drivers/gpu/ion/tegra/Makefile
+++ b/drivers/staging/android/ion/tegra/Makefile
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 7af6e168ff4..0849600bcc0 100644
--- a/drivers/gpu/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -15,9 +15,9 @@
*/
#include <linux/err.h>
-#include <linux/ion.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../ion.h"
#include "../ion_priv.h"
struct ion_device *idev;
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index aba25cbb038..1a50669ec8a 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,11 +18,9 @@
#define _LINUX_SW_SYNC_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
-
#include <linux/kconfig.h>
#include "sync.h"
+#include "uapi/sw_sync.h"
struct sw_sync_timeline {
struct sync_timeline obj;
@@ -58,19 +56,4 @@ static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
}
#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-
#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986dc70..75da9e85ac6 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
#define _LINUX_SYNC_H
#include <linux/types.h>
-#ifdef __KERNEL__
-
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include "uapi/sync.h"
+
struct sync_timeline;
struct sync_pt;
struct sync_fence;
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implmenting the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependant data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data reutnred to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 00000000000..aa013f6f5f3
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 00000000000..ba4743c71d6
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644
index 00000000000..b6cb483592c
--- /dev/null
+++ b/drivers/staging/android/uapi/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void __user *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void __user *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void __user *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void __user *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 00000000000..f09e7c154d6
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ ion_user_handle_t handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 00000000000..352379a0269
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 00000000000..9b5d4869505
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 00000000000..57fdaadc4b0
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implmenting the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependant data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data reutnred to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 12fb818ab14..960d64fbd40 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -269,6 +269,17 @@ struct mtp_device_status {
__le16 wCode;
};
+struct mtp_data_header {
+ /* length of packet, including this header */
+ __le32 length;
+ /* container type (2 for data packet) */
+ __le16 type;
+ /* MTP command code */
+ __le16 command;
+ /* MTP transaction ID */
+ __le32 transaction_id;
+};
+
/* temporary variable used between mtp_open() and mtp_gadget_bind() */
static struct mtp_dev *_mtp_dev;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index bff23d33d66..a3279c7def7 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,8 +23,6 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/host1x/Kconfig"
-source "drivers/gpu/ion/Kconfig"
-
config VGASTATE
tristate
default n
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
index fd5bcde8785..933e74ac809 100644
--- a/drivers/video/adf/adf.c
+++ b/drivers/video/adf/adf.c
@@ -37,6 +37,8 @@
#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+static DEFINE_IDR(adf_devices);
+
static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
{
/* sync_fence_wait() dumps debug information on timeout. Experience
@@ -455,23 +457,20 @@ static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
struct idr *idr, struct adf_device *parent,
const struct adf_obj_ops *ops, const char *fmt, va_list args)
{
+ int ret;
+
if (ops && ops->supports_event && !ops->set_event) {
pr_err("%s: %s implements supports_event but not set_event\n",
__func__, adf_obj_type_str(type));
return -EINVAL;
}
- if (idr) {
- int ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- pr_err("%s: allocating object id failed: %d\n",
- __func__, ret);
- return ret;
- }
- obj->id = ret;
- } else {
- obj->id = -1;
+ ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+ return ret;
}
+ obj->id = ret;
vscnprintf(obj->name, sizeof(obj->name), fmt, args);
@@ -498,8 +497,7 @@ static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
}
mutex_destroy(&obj->event_lock);
- if (idr)
- idr_remove(idr, obj->id);
+ idr_remove(idr, obj->id);
}
/**
@@ -543,8 +541,8 @@ int adf_device_init(struct adf_device *dev, struct device *parent,
memset(dev, 0, sizeof(*dev));
va_start(args, fmt);
- ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, NULL, dev, &ops->base,
- fmt, args);
+ ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+ &ops->base, fmt, args);
va_end(args);
if (ret < 0)
return ret;
@@ -612,7 +610,7 @@ void adf_device_destroy(struct adf_device *dev)
}
mutex_destroy(&dev->post_lock);
mutex_destroy(&dev->client_lock);
- adf_obj_destroy(&dev->base, NULL);
+ adf_obj_destroy(&dev->base, &adf_devices);
}
EXPORT_SYMBOL(adf_device_destroy);
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
index e4a79213507..bba873d34bb 100644
--- a/drivers/video/adf/adf_client.c
+++ b/drivers/video/adf/adf_client.c
@@ -49,6 +49,9 @@ int adf_interface_blank(struct adf_interface *intf, u8 state)
if (!intf->ops || !intf->ops->blank)
return -EOPNOTSUPP;
+ if (state > DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
mutex_lock(&dev->client_lock);
if (state != DRM_MODE_DPMS_ON)
flush_kthread_worker(&dev->post_worker);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
index 477abd63ccc..cac34d14cbc 100644
--- a/drivers/video/adf/adf_fbdev.c
+++ b/drivers/video/adf/adf_fbdev.c
@@ -519,10 +519,10 @@ int adf_fbdev_blank(int blank, struct fb_info *info)
dpms_state = DRM_MODE_DPMS_STANDBY;
break;
case FB_BLANK_VSYNC_SUSPEND:
- dpms_state = DRM_MODE_DPMS_STANDBY;
+ dpms_state = DRM_MODE_DPMS_SUSPEND;
break;
case FB_BLANK_HSYNC_SUSPEND:
- dpms_state = DRM_MODE_DPMS_SUSPEND;
+ dpms_state = DRM_MODE_DPMS_STANDBY;
break;
case FB_BLANK_POWERDOWN:
dpms_state = DRM_MODE_DPMS_OFF;
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
index 076ccbd0cd8..8c659c71ffa 100644
--- a/drivers/video/adf/adf_sysfs.c
+++ b/drivers/video/adf/adf_sysfs.c
@@ -105,11 +105,6 @@ static struct device_attribute adf_interface_attrs[] = {
__ATTR_RO(vsync_timestamp),
};
-static char *adf_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "adf/%s", dev_name(dev));
-}
-
int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
{
int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
@@ -142,7 +137,7 @@ static char *adf_device_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
- return kasprintf(GFP_KERNEL, "adf/%s/device", obj->name);
+ return kasprintf(GFP_KERNEL, "adf%d", obj->id);
}
static char *adf_interface_devnode(struct device *dev, umode_t *mode,
@@ -151,8 +146,8 @@ static char *adf_interface_devnode(struct device *dev, umode_t *mode,
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
struct adf_interface *intf = adf_obj_to_interface(obj);
struct adf_device *parent = adf_interface_parent(intf);
- return kasprintf(GFP_KERNEL, "adf/%s/interface%d",
- parent->base.name, intf->base.id);
+ return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+ parent->base.id, intf->base.id);
}
static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
@@ -161,8 +156,8 @@ static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
struct adf_device *parent = adf_overlay_engine_parent(eng);
- return kasprintf(GFP_KERNEL, "adf/%s/overlay-engine%d",
- parent->base.name, eng->base.id);
+ return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+ parent->base.id, eng->base.id);
}
static void adf_noop_release(struct device *dev)
@@ -285,7 +280,6 @@ int adf_sysfs_init(void)
goto err_chrdev;
}
- class->devnode = adf_devnode;
adf_class = class;
adf_major = ret;
return 0;