aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Konovalov <andrey.konovalov@linaro.org>2012-12-10 17:21:40 +0400
committerAndrey Konovalov <andrey.konovalov@linaro.org>2012-12-10 17:21:40 +0400
commit59b6ef45b2216f333b6c0cb7c8eb7dc280f8df6a (patch)
treec74ea075285537d96ada60178faf4631c7b45aaa
parent74a2bcfdba011cb35c201425f27e517c8a2b68e6 (diff)
parent05a789f13dad6247198edc55423be09236411b76 (diff)
Automatically merging tracking-ll-v3.7-misc-fixes into merge-linux-linaroll-20121210.0
Conflicting files:
-rw-r--r--arch/arm/include/asm/bitsperlong.h1
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/include/asm/bitsperlong.h1
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/base/sw_sync.c50
-rw-r--r--drivers/base/sync.c315
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c224
-rw-r--r--drivers/gpu/ion/Kconfig1
-rw-r--r--drivers/gpu/ion/Makefile3
-rw-r--r--drivers/gpu/ion/ion.c1020
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c28
-rw-r--r--drivers/gpu/ion/ion_page_pool.c281
-rw-r--r--drivers/gpu/ion/ion_priv.h105
-rw-r--r--drivers/gpu/ion/ion_system_heap.c394
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/android_battery.c767
-rw-r--r--drivers/staging/android/logger.c5
-rw-r--r--drivers/usb/gadget/android.c284
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/f_accessory.c402
-rw-r--r--drivers/usb/gadget/f_audio_source.c828
-rw-r--r--drivers/usb/gadget/f_rndis.c11
-rw-r--r--drivers/usb/gadget/rndis.c11
-rw-r--r--include/linux/ion.h116
-rw-r--r--include/linux/platform_data/android_battery.h47
-rw-r--r--include/linux/sync.h70
-rw-r--r--include/linux/usb/f_accessory.h65
-rw-r--r--include/trace/events/cpufreq_interactive.h36
-rw-r--r--include/trace/events/sync.h82
-rw-r--r--kernel/sched/fair.c130
-rw-r--r--linaro/configs/linaro-base.conf1
-rw-r--r--linaro/configs/omap4.conf2
-rw-r--r--linaro/configs/ubuntu.conf1
-rw-r--r--net/netfilter/xt_qtaguid.c14
-rw-r--r--net/netfilter/xt_quota2.c5
-rwxr-xr-xscripts/kconfig/merge_config.sh20
-rw-r--r--tools/perf/compat-android.h29
-rw-r--r--tools/perf/util/pager.c1
-rw-r--r--tools/perf/util/util.h2
40 files changed, 4520 insertions, 853 deletions
diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..6dc0bb0c13b2
--- /dev/null
+++ b/arch/arm/include/asm/bitsperlong.h
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9eb7a4e9330b..c35bbdadd25c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -587,7 +587,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte_t *pte = start_pte + pte_index(addr);
/* If replacing a section mapping, the whole section must be replaced */
- BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+ BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
diff --git a/arch/arm64/include/asm/bitsperlong.h b/arch/arm64/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..6dc0bb0c13b2
--- /dev/null
+++ b/arch/arm64/include/asm/bitsperlong.h
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/drivers/Makefile b/drivers/Makefile
index 957c44ea6603..487d933d1c66 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -147,7 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
-# IRQ chips
-obj-y += irqchip/
-
obj-$(CONFIG_GATOR) += gator/
diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c
index 21ddf4ffd589..b4d8529ee892 100644
--- a/drivers/base/sw_sync.c
+++ b/drivers/base/sw_sync.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
@@ -42,6 +43,7 @@ struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
return (struct sync_pt *)pt;
}
+EXPORT_SYMBOL(sw_sync_pt_create);
static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
{
@@ -69,23 +71,6 @@ static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
return sw_sync_cmp(pt_a->value, pt_b->value);
}
-static void sw_sync_print_obj(struct seq_file *s,
- struct sync_timeline *sync_timeline)
-{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)sync_timeline;
-
- seq_printf(s, "%d", obj->value);
-}
-
-static void sw_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt->parent;
-
- seq_printf(s, "%d / %d", pt->value, obj->value);
-}
-
static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
void *data, int size)
{
@@ -99,14 +84,29 @@ static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
return sizeof(pt->value);
}
+static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+ char *str, int size)
+{
+ struct sw_sync_timeline *timeline =
+ (struct sw_sync_timeline *)sync_timeline;
+ snprintf(str, size, "%d", timeline->value);
+}
+
+static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
+ char *str, int size)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ snprintf(str, size, "%d", pt->value);
+}
+
struct sync_timeline_ops sw_sync_timeline_ops = {
.driver_name = "sw_sync",
.dup = sw_sync_pt_dup,
.has_signaled = sw_sync_pt_has_signaled,
.compare = sw_sync_pt_compare,
- .print_obj = sw_sync_print_obj,
- .print_pt = sw_sync_print_pt,
.fill_driver_data = sw_sync_fill_driver_data,
+ .timeline_value_str = sw_sync_timeline_value_str,
+ .pt_value_str = sw_sync_pt_value_str,
};
@@ -119,6 +119,7 @@ struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
return obj;
}
+EXPORT_SYMBOL(sw_sync_timeline_create);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
{
@@ -126,7 +127,7 @@ void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
sync_timeline_signal(&obj->obj);
}
-
+EXPORT_SYMBOL(sw_sync_timeline_inc);
#ifdef CONFIG_SW_SYNC_USER
/* *WARNING*
@@ -166,8 +167,13 @@ long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
struct sync_fence *fence;
struct sw_sync_create_fence_data data;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
pt = sw_sync_pt_create(obj, data.value);
if (pt == NULL) {
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
index cc40e4c9e653..809d02b21e08 100644
--- a/drivers/base/sync.c
+++ b/drivers/base/sync.c
@@ -15,6 +15,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -27,8 +28,13 @@
#include <linux/anon_inodes.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sync.h>
+
static void sync_fence_signal_pt(struct sync_pt *pt);
static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
@@ -49,6 +55,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
if (obj == NULL)
return NULL;
+ kref_init(&obj->kref);
obj->ops = ops;
strlcpy(obj->name, name, sizeof(obj->name));
@@ -64,9 +71,12 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
return obj;
}
+EXPORT_SYMBOL(sync_timeline_create);
-static void sync_timeline_free(struct sync_timeline *obj)
+static void sync_timeline_free(struct kref *kref)
{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
unsigned long flags;
if (obj->ops->release_obj)
@@ -81,19 +91,17 @@ static void sync_timeline_free(struct sync_timeline *obj)
void sync_timeline_destroy(struct sync_timeline *obj)
{
- unsigned long flags;
- bool needs_freeing;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
obj->destroyed = true;
- needs_freeing = list_empty(&obj->child_list_head);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- if (needs_freeing)
- sync_timeline_free(obj);
- else
+ /*
+ * If this is not the last reference, signal any children
+ * that their parent is going away.
+ */
+
+ if (!kref_put(&obj->kref, sync_timeline_free))
sync_timeline_signal(obj);
}
+EXPORT_SYMBOL(sync_timeline_destroy);
static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
{
@@ -110,7 +118,6 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
{
struct sync_timeline *obj = pt->parent;
unsigned long flags;
- bool needs_freeing;
spin_lock_irqsave(&obj->active_list_lock, flags);
if (!list_empty(&pt->active_list))
@@ -118,12 +125,10 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
spin_unlock_irqrestore(&obj->active_list_lock, flags);
spin_lock_irqsave(&obj->child_list_lock, flags);
- list_del(&pt->child_list);
- needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
+ if (!list_empty(&pt->child_list)) {
+ list_del_init(&pt->child_list);
+ }
spin_unlock_irqrestore(&obj->child_list_lock, flags);
-
- if (needs_freeing)
- sync_timeline_free(obj);
}
void sync_timeline_signal(struct sync_timeline *obj)
@@ -132,26 +137,33 @@ void sync_timeline_signal(struct sync_timeline *obj)
LIST_HEAD(signaled_pts);
struct list_head *pos, *n;
+ trace_sync_timeline(obj);
+
spin_lock_irqsave(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &obj->active_list_head) {
struct sync_pt *pt =
container_of(pos, struct sync_pt, active_list);
- if (_sync_pt_has_signaled(pt))
- list_move(pos, &signaled_pts);
+ if (_sync_pt_has_signaled(pt)) {
+ list_del_init(pos);
+ list_add(&pt->signaled_list, &signaled_pts);
+ kref_get(&pt->fence->kref);
+ }
}
spin_unlock_irqrestore(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &signaled_pts) {
struct sync_pt *pt =
- container_of(pos, struct sync_pt, active_list);
+ container_of(pos, struct sync_pt, signaled_list);
list_del_init(pos);
sync_fence_signal_pt(pt);
+ kref_put(&pt->fence->kref, sync_fence_free);
}
}
+EXPORT_SYMBOL(sync_timeline_signal);
struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
{
@@ -165,10 +177,12 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
return NULL;
INIT_LIST_HEAD(&pt->active_list);
+ kref_get(&parent->kref);
sync_timeline_add_pt(parent, pt);
return pt;
}
+EXPORT_SYMBOL(sync_pt_create);
void sync_pt_free(struct sync_pt *pt)
{
@@ -177,8 +191,11 @@ void sync_pt_free(struct sync_pt *pt)
sync_timeline_remove_pt(pt);
+ kref_put(&pt->parent->kref, sync_timeline_free);
+
kfree(pt);
}
+EXPORT_SYMBOL(sync_pt_free);
/* call with pt->parent->active_list_lock held */
static int _sync_pt_has_signaled(struct sync_pt *pt)
@@ -247,6 +264,7 @@ static struct sync_fence *sync_fence_alloc(const char *name)
if (fence->file == NULL)
goto err;
+ kref_init(&fence->kref);
strlcpy(fence->name, name, sizeof(fence->name));
INIT_LIST_HEAD(&fence->pt_list_head);
@@ -282,8 +300,15 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
list_add(&pt->pt_list, &fence->pt_list_head);
sync_pt_activate(pt);
+ /*
+ * signal the fence in case pt was activated before
+ * sync_pt_activate(pt) was called
+ */
+ sync_fence_signal_pt(pt);
+
return fence;
}
+EXPORT_SYMBOL(sync_fence_create);
static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
{
@@ -305,6 +330,65 @@ static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
return 0;
}
+static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+ struct list_head *src_pos, *dst_pos, *n;
+
+ list_for_each(src_pos, &src->pt_list_head) {
+ struct sync_pt *src_pt =
+ container_of(src_pos, struct sync_pt, pt_list);
+ bool collapsed = false;
+
+ list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
+ struct sync_pt *dst_pt =
+ container_of(dst_pos, struct sync_pt, pt_list);
+ /* collapse two sync_pts on the same timeline
+ * to a single sync_pt that will signal at
+ * the later of the two
+ */
+ if (dst_pt->parent == src_pt->parent) {
+ if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
+ struct sync_pt *new_pt =
+ sync_pt_dup(src_pt);
+ if (new_pt == NULL)
+ return -ENOMEM;
+
+ new_pt->fence = dst;
+ list_replace(&dst_pt->pt_list,
+ &new_pt->pt_list);
+ sync_pt_activate(new_pt);
+ sync_pt_free(dst_pt);
+ }
+ collapsed = true;
+ break;
+ }
+ }
+
+ if (!collapsed) {
+ struct sync_pt *new_pt = sync_pt_dup(src_pt);
+
+ if (new_pt == NULL)
+ return -ENOMEM;
+
+ new_pt->fence = dst;
+ list_add(&new_pt->pt_list, &dst->pt_list_head);
+ sync_pt_activate(new_pt);
+ }
+ }
+
+ return 0;
+}
+
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &fence->pt_list_head) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+ sync_timeline_remove_pt(pt);
+ }
+}
+
static void sync_fence_free_pts(struct sync_fence *fence)
{
struct list_head *pos, *n;
@@ -331,16 +415,19 @@ err:
fput(file);
return NULL;
}
+EXPORT_SYMBOL(sync_fence_fdget);
void sync_fence_put(struct sync_fence *fence)
{
fput(fence->file);
}
+EXPORT_SYMBOL(sync_fence_put);
void sync_fence_install(struct sync_fence *fence, int fd)
{
fd_install(fd, fence->file);
}
+EXPORT_SYMBOL(sync_fence_install);
static int sync_fence_get_status(struct sync_fence *fence)
{
@@ -376,11 +463,17 @@ struct sync_fence *sync_fence_merge(const char *name,
if (err < 0)
goto err;
- err = sync_fence_copy_pts(fence, b);
+ err = sync_fence_merge_pts(fence, b);
if (err < 0)
goto err;
- fence->status = sync_fence_get_status(fence);
+ /*
+ * signal the fence in case one of it's pts were activated before
+ * they were activated
+ */
+ sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
+ struct sync_pt,
+ pt_list));
return fence;
err:
@@ -388,6 +481,7 @@ err:
kfree(fence);
return NULL;
}
+EXPORT_SYMBOL(sync_fence_merge);
static void sync_fence_signal_pt(struct sync_pt *pt)
{
@@ -421,33 +515,22 @@ static void sync_fence_signal_pt(struct sync_pt *pt)
container_of(pos, struct sync_fence_waiter,
waiter_list);
- waiter->callback(fence, waiter->callback_data);
list_del(pos);
- kfree(waiter);
+ waiter->callback(fence, waiter);
}
wake_up(&fence->wq);
}
}
int sync_fence_wait_async(struct sync_fence *fence,
- void (*callback)(struct sync_fence *, void *data),
- void *callback_data)
+ struct sync_fence_waiter *waiter)
{
- struct sync_fence_waiter *waiter;
unsigned long flags;
int err = 0;
- waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
- if (waiter == NULL)
- return -ENOMEM;
-
- waiter->callback = callback;
- waiter->callback_data = callback_data;
-
spin_lock_irqsave(&fence->waiter_list_lock, flags);
if (fence->status) {
- kfree(waiter);
err = fence->status;
goto out;
}
@@ -458,44 +541,118 @@ out:
return err;
}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ struct list_head *pos;
+ struct list_head *n;
+ unsigned long flags;
+ int ret = -ENOENT;
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ /*
+ * Make sure waiter is still in waiter_list because it is possible for
+ * the waiter to be removed from the list while the callback is still
+ * pending.
+ */
+ list_for_each_safe(pos, n, &fence->waiter_list_head) {
+ struct sync_fence_waiter *list_waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+ if (list_waiter == waiter) {
+ list_del(pos);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+static bool sync_fence_check(struct sync_fence *fence)
+{
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+ return fence->status != 0;
+}
int sync_fence_wait(struct sync_fence *fence, long timeout)
{
- int err;
+ int err = 0;
+ struct sync_pt *pt;
+
+ trace_sync_wait(fence, 1);
+ list_for_each_entry(pt, &fence->pt_list_head, pt_list)
+ trace_sync_pt(pt);
- if (timeout) {
+ if (timeout > 0) {
timeout = msecs_to_jiffies(timeout);
err = wait_event_interruptible_timeout(fence->wq,
- fence->status != 0,
+ sync_fence_check(fence),
timeout);
- } else {
- err = wait_event_interruptible(fence->wq, fence->status != 0);
+ } else if (timeout < 0) {
+ err = wait_event_interruptible(fence->wq,
+ sync_fence_check(fence));
}
+ trace_sync_wait(fence, 0);
if (err < 0)
return err;
- if (fence->status < 0)
+ if (fence->status < 0) {
+ pr_info("fence error %d on [%p]\n", fence->status, fence);
+ sync_dump();
return fence->status;
+ }
- if (fence->status == 0)
+ if (fence->status == 0) {
+ pr_info("fence timeout on [%p] after %dms\n", fence,
+ jiffies_to_msecs(timeout));
+ sync_dump();
return -ETIME;
+ }
return 0;
}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+ struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+ sync_fence_free_pts(fence);
+
+ kfree(fence);
+}
static int sync_fence_release(struct inode *inode, struct file *file)
{
struct sync_fence *fence = file->private_data;
unsigned long flags;
- sync_fence_free_pts(fence);
-
+ /*
+ * We need to remove all ways to access this fence before droping
+ * our ref.
+ *
+ * start with its membership in the global fence list
+ */
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_del(&fence->sync_fence_list);
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- kfree(fence);
+ /*
+ * remove its pts from their parents so that sync_timeline_signal()
+ * can't reference the fence.
+ */
+ sync_fence_detach_pts(fence);
+
+ kref_put(&fence->kref, sync_fence_free);
return 0;
}
@@ -506,6 +663,12 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
poll_wait(file, &fence->wq, wait);
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+
if (fence->status == 1)
return POLLIN;
else if (fence->status < 0)
@@ -516,7 +679,7 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
{
- __u32 value;
+ __s32 value;
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
@@ -531,8 +694,13 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
struct sync_fence *fence2, *fence3;
struct sync_merge_data data;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
fence2 = sync_fence_fdget(data.fd2);
if (fence2 == NULL) {
@@ -689,7 +857,17 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
}
- if (pt->parent->ops->print_pt) {
+ if (pt->parent->ops->timeline_value_str &&
+ pt->parent->ops->pt_value_str) {
+ char value[64];
+ pt->parent->ops->pt_value_str(pt, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ if (fence) {
+ pt->parent->ops->timeline_value_str(pt->parent, value,
+ sizeof(value));
+ seq_printf(s, " / %s", value);
+ }
+ } else if (pt->parent->ops->print_pt) {
seq_printf(s, ": ");
pt->parent->ops->print_pt(s, pt);
}
@@ -704,7 +882,11 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
- if (obj->ops->print_obj) {
+ if (obj->ops->timeline_value_str) {
+ char value[64];
+ obj->ops->timeline_value_str(obj, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ } else if (obj->ops->print_obj) {
seq_printf(s, ": ");
obj->ops->print_obj(s, obj);
}
@@ -725,7 +907,8 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
struct list_head *pos;
unsigned long flags;
- seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
+ seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+ sync_status_str(fence->status));
list_for_each(pos, &fence->pt_list_head) {
struct sync_pt *pt =
@@ -739,8 +922,7 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
container_of(pos, struct sync_fence_waiter,
waiter_list);
- seq_printf(s, "waiter %pF %p\n", waiter->callback,
- waiter->callback_data);
+ seq_printf(s, "waiter %pF\n", waiter->callback);
}
spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
}
@@ -794,7 +976,34 @@ static __init int sync_debugfs_init(void)
debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
return 0;
}
-
late_initcall(sync_debugfs_init);
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+ struct seq_file s = {
+ .buf = sync_dump_buf,
+ .size = sizeof(sync_dump_buf) - 1,
+ };
+ int i;
+
+ sync_debugfs_show(&s, NULL);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
+#else
+static void sync_dump(void)
+{
+}
#endif
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index acbd4cd20fc3..c82d9fee2848 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -20,6 +20,7 @@
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/tick.h>
@@ -40,9 +41,7 @@ struct cpufreq_interactive_cpuinfo {
struct timer_list cpu_timer;
int timer_idlecancel;
u64 time_in_idle;
- u64 idle_exit_time;
- u64 timer_run_time;
- int idling;
+ u64 time_in_idle_timestamp;
u64 target_set_time;
u64 target_set_time_in_idle;
struct cpufreq_policy *policy;
@@ -68,6 +67,10 @@ static unsigned int hispeed_freq;
#define DEFAULT_GO_HISPEED_LOAD 85
static unsigned long go_hispeed_load;
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned long target_load = DEFAULT_TARGET_LOAD;
+
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
@@ -93,6 +96,11 @@ static unsigned long above_hispeed_delay_val;
static int boost_val;
+static bool governidle;
+module_param(governidle, bool, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(governidle,
+ "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
+
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -106,14 +114,23 @@ struct cpufreq_governor cpufreq_gov_interactive = {
.owner = THIS_MODULE,
};
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
+{
+ mod_timer_pinned(&pcpu->cpu_timer,
+ jiffies + usecs_to_jiffies(timer_rate));
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+}
+
static void cpufreq_interactive_timer(unsigned long data)
{
+ u64 now;
unsigned int delta_idle;
unsigned int delta_time;
int cpu_load;
int load_since_change;
- u64 time_in_idle;
- u64 idle_exit_time;
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, data);
u64 now_idle;
@@ -126,26 +143,9 @@ static void cpufreq_interactive_timer(unsigned long data)
if (!pcpu->governor_enabled)
goto exit;
- /*
- * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
- * this lets idle exit know the current idle time sample has
- * been processed, and idle exit can generate a new sample and
- * re-arm the timer. This prevents a concurrent idle
- * exit on that CPU from writing a new set of info at the same time
- * the timer function runs (the timer function can't use that info
- * until more time passes).
- */
- time_in_idle = pcpu->time_in_idle;
- idle_exit_time = pcpu->idle_exit_time;
- now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
- smp_wmb();
-
- /* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time)
- goto exit;
-
- delta_idle = (unsigned int)(now_idle - time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
+ now_idle = get_cpu_idle_time_us(data, &now);
+ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
/*
* If timer ran less than 1ms after short-term sample started, retry.
@@ -159,8 +159,7 @@ static void cpufreq_interactive_timer(unsigned long data)
cpu_load = 100 * (delta_time - delta_idle) / delta_time;
delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time -
- pcpu->target_set_time);
+ delta_time = (unsigned int)(now - pcpu->target_set_time);
if ((delta_time == 0) || (delta_idle > delta_time))
load_since_change = 0;
@@ -176,35 +175,25 @@ static void cpufreq_interactive_timer(unsigned long data)
if (load_since_change > cpu_load)
cpu_load = load_since_change;
- if (cpu_load >= go_hispeed_load || boost_val) {
- if (pcpu->target_freq < hispeed_freq &&
- hispeed_freq < pcpu->policy->max) {
- new_freq = hispeed_freq;
- } else {
- new_freq = pcpu->policy->max * cpu_load / 100;
-
- if (new_freq < hispeed_freq)
- new_freq = hispeed_freq;
-
- if (pcpu->target_freq == hispeed_freq &&
- new_freq > hispeed_freq &&
- pcpu->timer_run_time - pcpu->hispeed_validate_time
- < above_hispeed_delay_val) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq,
- new_freq);
- goto rearm;
- }
- }
- } else {
- new_freq = hispeed_freq * cpu_load / 100;
+ if ((cpu_load >= go_hispeed_load || boost_val) &&
+ pcpu->target_freq < hispeed_freq)
+ new_freq = hispeed_freq;
+ else
+ new_freq = pcpu->policy->cur * cpu_load / target_load;
+
+ if (pcpu->target_freq >= hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
}
- if (new_freq <= hispeed_freq)
- pcpu->hispeed_validate_time = pcpu->timer_run_time;
+ pcpu->hispeed_validate_time = now;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_H,
+ new_freq, CPUFREQ_RELATION_L,
&index)) {
pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
(int) data);
@@ -218,27 +207,28 @@ static void cpufreq_interactive_timer(unsigned long data)
* floor frequency for the minimum sample time since last validated.
*/
if (new_freq < pcpu->floor_freq) {
- if (pcpu->timer_run_time - pcpu->floor_validate_time
- < min_sample_time) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq, new_freq);
+ if (now - pcpu->floor_validate_time < min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm;
}
}
pcpu->floor_freq = new_freq;
- pcpu->floor_validate_time = pcpu->timer_run_time;
+ pcpu->floor_validate_time = now;
if (pcpu->target_freq == new_freq) {
- trace_cpufreq_interactive_already(data, cpu_load,
- pcpu->target_freq, new_freq);
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm_if_notmax;
}
trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
- new_freq);
+ pcpu->policy->cur, new_freq);
pcpu->target_set_time_in_idle = now_idle;
- pcpu->target_set_time = pcpu->timer_run_time;
+ pcpu->target_set_time = now;
pcpu->target_freq = new_freq;
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
@@ -257,23 +247,14 @@ rearm_if_notmax:
rearm:
if (!timer_pending(&pcpu->cpu_timer)) {
/*
- * If already at min: if that CPU is idle, don't set timer.
- * Else cancel the timer if that CPU goes idle. We don't
- * need to re-evaluate speed until the next idle exit.
+ * If governing speed in idle and already at min, cancel the
+ * timer if that CPU goes idle. We don't need to re-evaluate
+ * speed until the next idle exit.
*/
- if (pcpu->target_freq == pcpu->policy->min) {
- smp_rmb();
-
- if (pcpu->idling)
- goto exit;
-
+ if (governidle && pcpu->target_freq == pcpu->policy->min)
pcpu->timer_idlecancel = 1;
- }
- pcpu->time_in_idle = get_cpu_idle_time_us(
- data, &pcpu->idle_exit_time);
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ cpufreq_interactive_timer_resched(pcpu);
}
exit:
@@ -289,12 +270,9 @@ static void cpufreq_interactive_idle_start(void)
if (!pcpu->governor_enabled)
return;
- pcpu->idling = 1;
- smp_wmb();
pending = timer_pending(&pcpu->cpu_timer);
if (pcpu->target_freq != pcpu->policy->min) {
-#ifdef CONFIG_SMP
/*
* Entering idle while not at lowest speed. On some
* platforms this can hold the other CPU(s) at that speed
@@ -304,14 +282,10 @@ static void cpufreq_interactive_idle_start(void)
* the CPUFreq driver.
*/
if (!pending) {
- pcpu->time_in_idle = get_cpu_idle_time_us(
- smp_processor_id(), &pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ cpufreq_interactive_timer_resched(pcpu);
}
-#endif
- } else {
+ } else if (governidle) {
/*
* If at min speed and entering idle after load has
* already been evaluated, and a timer has been set just in
@@ -320,12 +294,6 @@ static void cpufreq_interactive_idle_start(void)
*/
if (pending && pcpu->timer_idlecancel) {
del_timer(&pcpu->cpu_timer);
- /*
- * Ensure last timer run time is after current idle
- * sample start time, so next idle exit will always
- * start a new idle sampling period.
- */
- pcpu->idle_exit_time = 0;
pcpu->timer_idlecancel = 0;
}
}
@@ -340,31 +308,15 @@ static void cpufreq_interactive_idle_end(void)
if (!pcpu->governor_enabled)
return;
- pcpu->idling = 0;
- smp_wmb();
-
- /*
- * Arm the timer for 1-2 ticks later if not already, and if the timer
- * function has already processed the previous load sampling
- * interval. (If the timer is not pending but has not processed
- * the previous interval, it is probably racing with us on another
- * CPU. Let it compute load based on the previous sample and then
- * re-arm the timer for another interval when it's done, rather
- * than updating the interval start time to be "now", which doesn't
- * give the timer function enough time to make a decision on this
- * run.)
- */
- if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time >= pcpu->idle_exit_time &&
- pcpu->governor_enabled) {
- pcpu->time_in_idle =
- get_cpu_idle_time_us(smp_processor_id(),
- &pcpu->idle_exit_time);
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (!governidle &&
+ time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ cpufreq_interactive_timer(smp_processor_id());
}
-
}
static int cpufreq_interactive_speedchange_task(void *data)
@@ -461,6 +413,30 @@ static void cpufreq_interactive_boost(void)
wake_up_process(speedchange_task);
}
+static ssize_t show_target_load(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", target_load);
+}
+
+static ssize_t store_target_load(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ target_load = val;
+ return count;
+}
+
+static struct global_attr target_load_attr =
+ __ATTR(target_load, S_IRUGO | S_IWUSR,
+ show_target_load, store_target_load);
+
static ssize_t show_hispeed_freq(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -622,6 +598,7 @@ static struct global_attr boostpulse =
__ATTR(boostpulse, 0200, NULL, store_boostpulse);
static struct attribute *interactive_attributes[] = {
+ &target_load_attr.attr,
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
&above_hispeed_delay.attr,
@@ -672,6 +649,8 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
freq_table =
cpufreq_frequency_get_table(policy->cpu);
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
for_each_cpu(j, policy->cpus) {
pcpu = &per_cpu(cpuinfo, j);
@@ -688,11 +667,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->target_set_time;
pcpu->governor_enabled = 1;
smp_wmb();
+ pcpu->cpu_timer.expires =
+ jiffies + usecs_to_jiffies(timer_rate);
+ add_timer_on(&pcpu->cpu_timer, j);
}
- if (!hispeed_freq)
- hispeed_freq = policy->max;
-
/*
* Do not register the idle hook and create sysfs
* entries if we have already done so.
@@ -714,14 +693,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->governor_enabled = 0;
smp_wmb();
del_timer_sync(&pcpu->cpu_timer);
-
- /*
- * Reset idle exit time since we may cancel the timer
- * before it can run after the last idle exit time,
- * to avoid tripping the check in idle exit for a timer
- * that is trying to run.
- */
- pcpu->idle_exit_time = 0;
}
if (atomic_dec_return(&active_count) > 0)
@@ -759,7 +730,10 @@ static int __init cpufreq_interactive_init(void)
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
- init_timer(&pcpu->cpu_timer);
+ if (governidle)
+ init_timer(&pcpu->cpu_timer);
+ else
+ init_timer_deferrable(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
}
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index 5b48b4e85e73..b5bfdb47fd09 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -1,6 +1,7 @@
menuconfig ION
tristate "Ion Memory Manager"
select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
help
Chose this option to enable the ION Memory Manager.
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 73fe3fa10706..d1ddebb74a3f 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 1002ec0d52de..e675529b8482 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -20,6 +20,7 @@
#include <linux/anon_inodes.h>
#include <linux/ion.h>
#include <linux/list.h>
+#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -30,33 +31,33 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
#include "ion_priv.h"
-#define DEBUG
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @lock: lock protecting the buffers & heaps trees
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
* @heaps: list of all the heaps in the system
* @user_clients: list of all the clients created from userspace
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
- struct mutex lock;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
struct rb_root heaps;
long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
unsigned long arg);
- struct rb_root user_clients;
- struct rb_root kernel_clients;
+ struct rb_root clients;
struct dentry *debug_root;
};
/**
* struct ion_client - a process/hw block local address space
- * @ref: for reference counting the client
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
@@ -70,7 +71,6 @@ struct ion_device {
* as well as the handles themselves, and should be held while modifying either.
*/
struct ion_client {
- struct kref ref;
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
@@ -90,7 +90,6 @@ struct ion_client {
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
* @dmap_cnt: count of times this client has mapped for dma
- * @usermap_cnt: count of times this client has mapped for userspace
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
@@ -101,10 +100,19 @@ struct ion_handle {
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
- unsigned int dmap_cnt;
- unsigned int usermap_cnt;
};
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return ((buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -131,6 +139,8 @@ static void ion_buffer_add(struct ion_device *dev,
rb_insert_color(&buffer->node, &dev->buffers);
}
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -139,13 +149,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
unsigned long flags)
{
struct ion_buffer *buffer;
- int ret;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
+ buffer->flags = flags;
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
@@ -153,11 +166,57 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
kfree(buffer);
return ERR_PTR(ret);
}
+
buffer->dev = dev;
buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (IS_ERR_OR_NULL(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
+ i) {
+ if (sg_dma_len(sg) == PAGE_SIZE)
+ continue;
+ pr_err("%s: cached mappings that will be faulted in "
+ "must have pagewise sg_lists\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ion_buffer_alloc_dirty(buffer);
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(ret);
}
static void ion_buffer_destroy(struct kref *kref)
@@ -165,10 +224,15 @@ static void ion_buffer_destroy(struct kref *kref)
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_device *dev = buffer->dev;
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- mutex_lock(&dev->lock);
+ mutex_lock(&dev->buffer_lock);
rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->lock);
+ mutex_unlock(&dev->buffer_lock);
+ if (buffer->flags & ION_FLAG_CACHED)
+ kfree(buffer->dirty);
kfree(buffer);
}
@@ -182,6 +246,37 @@ static int ion_buffer_put(struct ion_buffer *buffer)
return kref_put(&buffer->ref, ion_buffer_destroy);
}
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
@@ -191,25 +286,34 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
- rb_init_node(&handle->node);
+ RB_CLEAR_NODE(&handle->node);
handle->client = client;
ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
return handle;
}
+static void ion_handle_kmap_put(struct ion_handle *);
+
static void ion_handle_destroy(struct kref *kref)
{
struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- /* XXX Can a handle be destroyed while it's map count is non-zero?:
- if (handle->map_cnt) unmap
- */
- ion_buffer_put(handle->buffer);
- mutex_lock(&handle->client->lock);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &handle->client->handles);
- mutex_unlock(&handle->client->lock);
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
kfree(handle);
}
@@ -282,57 +386,66 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int flags)
+ size_t align, unsigned int heap_mask,
+ unsigned int flags)
{
struct rb_node *n;
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
+ pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
+ align, heap_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
- mutex_lock(&dev->lock);
+ if (WARN_ON(!len))
+ return ERR_PTR(-EINVAL);
+
+ len = PAGE_ALIGN(len);
+
+ down_read(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
/* if the client doesn't support this heap type */
if (!((1 << heap->type) & client->heap_mask))
continue;
/* if the caller didn't specify this heap type */
- if (!((1 << heap->id) & flags))
+ if (!((1 << heap->id) & heap_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer))
break;
}
- mutex_unlock(&dev->lock);
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
- if (IS_ERR_OR_NULL(buffer))
+ if (IS_ERR(buffer))
return ERR_PTR(PTR_ERR(buffer));
handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
-
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
- mutex_lock(&client->lock);
- ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- return handle;
+ if (!IS_ERR(handle)) {
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ }
+
-end:
- ion_buffer_put(buffer);
return handle;
}
+EXPORT_SYMBOL(ion_alloc);
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
@@ -342,46 +455,16 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
- mutex_unlock(&client->lock);
if (!valid_handle) {
- WARN("%s: invalid handle passed to free.\n", __func__);
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
return;
}
ion_handle_put(handle);
+ mutex_unlock(&client->lock);
}
-
-static void ion_client_get(struct ion_client *client);
-static int ion_client_put(struct ion_client *client);
-
-static bool _ion_map(int *buffer_cnt, int *handle_cnt)
-{
- bool map;
-
- BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
-
- if (*buffer_cnt)
- map = false;
- else
- map = true;
- if (*handle_cnt == 0)
- (*buffer_cnt)++;
- (*handle_cnt)++;
- return map;
-}
-
-static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
-{
- BUG_ON(*handle_cnt == 0);
- (*handle_cnt)--;
- if (*handle_cnt != 0)
- return false;
- BUG_ON(*buffer_cnt == 0);
- (*buffer_cnt)--;
- if (*buffer_cnt == 0)
- return true;
- return false;
-}
+EXPORT_SYMBOL(ion_free);
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len)
@@ -407,175 +490,100 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
+EXPORT_SYMBOL(ion_phys);
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer;
void *vaddr;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
}
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
}
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
- if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(vaddr))
- _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
- buffer->vaddr = vaddr;
- } else {
- vaddr = buffer->vaddr;
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
}
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
}
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle)
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
- struct scatterlist *sglist;
+ void *vaddr;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
+ pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
+
buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- if (!handle->buffer->heap->ops->map_dma) {
+ if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
- mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
- if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(sglist))
- _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
- buffer->sglist = sglist;
- } else {
- sglist = buffer->sglist;
- }
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return sglist;
-}
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
+ vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
+ return vaddr;
}
+EXPORT_SYMBOL(ion_map_kernel);
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
mutex_lock(&client->lock);
buffer = handle->buffer;
mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- buffer->sglist = NULL;
- }
+ ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
-
-
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle)
-{
- bool valid_handle;
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- mutex_unlock(&client->lock);
- if (!valid_handle) {
- WARN("%s: invalid handle passed to share.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- /* do not take an extra reference here, the burden is on the caller
- * to make sure the buffer doesn't go away while it's passing it
- * to another client -- ion_free should not be called on this handle
- * until the buffer has been imported into the other client
- */
- return handle->buffer;
-}
-
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle = NULL;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR_OR_NULL(handle)) {
- ion_handle_get(handle);
- goto end;
- }
- handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
- ion_handle_add(client, handle);
-end:
- mutex_unlock(&client->lock);
- return handle;
-}
-
-static const struct file_operations ion_share_fops;
-
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
-{
- struct file *file = fget(fd);
- struct ion_handle *handle;
-
- if (!file) {
- pr_err("%s: imported fd not found in file table.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- if (file->f_op != &ion_share_fops) {
- pr_err("%s: imported file is not a shared ion file.\n",
- __func__);
- handle = ERR_PTR(-EINVAL);
- goto end;
- }
- handle = ion_import(client, file->private_data);
-end:
- fput(file);
- return handle;
-}
+EXPORT_SYMBOL(ion_unmap_kernel);
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
@@ -601,8 +609,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
for (i = 0; i < ION_NUM_HEAPS; i++) {
if (!names[i])
continue;
- seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
- atomic_read(&client->ref.refcount));
+ seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
}
return 0;
}
@@ -619,29 +626,6 @@ static const struct file_operations debug_client_fops = {
.release = single_release,
};
-static struct ion_client *ion_client_lookup(struct ion_device *dev,
- struct task_struct *task)
-{
- struct rb_node *n = dev->user_clients.rb_node;
- struct ion_client *client;
-
- mutex_lock(&dev->lock);
- while (n) {
- client = rb_entry(n, struct ion_client, node);
- if (task == client->task) {
- ion_client_get(client);
- mutex_unlock(&dev->lock);
- return client;
- } else if (task < client->task) {
- n = n->rb_left;
- } else if (task > client->task) {
- n = n->rb_right;
- }
- }
- mutex_unlock(&dev->lock);
- return NULL;
-}
-
struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask,
const char *name)
@@ -667,19 +651,10 @@ struct ion_client *ion_client_create(struct ion_device *dev,
}
task_unlock(current->group_leader);
- /* if this isn't a kernel thread, see if a client already
- exists */
- if (task) {
- client = ion_client_lookup(dev, task);
- if (!IS_ERR_OR_NULL(client)) {
- put_task_struct(current->group_leader);
- return client;
- }
- }
-
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client) {
- put_task_struct(current->group_leader);
+ if (task)
+ put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
@@ -690,49 +665,32 @@ struct ion_client *ion_client_create(struct ion_device *dev,
client->heap_mask = heap_mask;
client->task = task;
client->pid = pid;
- kref_init(&client->ref);
-
- mutex_lock(&dev->lock);
- if (task) {
- p = &dev->user_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (task < entry->task)
- p = &(*p)->rb_left;
- else if (task > entry->task)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->user_clients);
- } else {
- p = &dev->kernel_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->kernel_clients);
+
+ down_write(&dev->lock);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
}
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
snprintf(debug_name, 64, "%u", client->pid);
client->debug_root = debugfs_create_file(debug_name, 0664,
dev->debug_root, client,
&debug_client_fops);
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return client;
}
-static void _ion_client_destroy(struct kref *kref)
+void ion_client_destroy(struct ion_client *client)
{
- struct ion_client *client = container_of(kref, struct ion_client, ref);
struct ion_device *dev = client->dev;
struct rb_node *n;
@@ -742,196 +700,350 @@ static void _ion_client_destroy(struct kref *kref)
node);
ion_handle_destroy(&handle->ref);
}
- mutex_lock(&dev->lock);
- if (client->task) {
- rb_erase(&client->node, &dev->user_clients);
+ down_write(&dev->lock);
+ if (client->task)
put_task_struct(client->task);
- } else {
- rb_erase(&client->node, &dev->kernel_clients);
- }
+ rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
kfree(client);
}
+EXPORT_SYMBOL(ion_client_destroy);
-static void ion_client_get(struct ion_client *client)
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
{
- kref_get(&client->ref);
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
-static int ion_client_put(struct ion_client *client)
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
- return kref_put(&client->ref, _ion_client_destroy);
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
}
-void ion_client_destroy(struct ion_client *client)
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
{
- ion_client_put(client);
}
-static int ion_share_release(struct inode *inode, struct file* file)
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = file->private_data;
+ unsigned long pages = buffer->sg_table->nents;
+ unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* drop the reference to the buffer -- this prevents the
- buffer from going away because the client holding it exited
- while it was being passed */
- ion_buffer_put(buffer);
+ buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
+ if (!buffer->dirty)
+ return -ENOMEM;
return 0;
}
-static void ion_vma_open(struct vm_area_struct *vma)
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
{
+ struct scatterlist *sg;
+ int i;
+ struct ion_vma_list *vma_list;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_client *client;
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* check that the client still exists and take a reference so
- it can't go away until this vma is closed */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- vma->vm_private_data = NULL;
+ if (!ion_buffer_fault_user_mappings(buffer))
return;
+
+ mutex_lock(&buffer->lock);
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (!test_bit(i, buffer->dirty))
+ continue;
+ dma_sync_sg_for_device(dev, sg, 1, dir);
+ clear_bit(i, buffer->dirty);
}
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
}
-static void ion_vma_close(struct vm_area_struct *vma)
+int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_client *client;
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct scatterlist *sg;
+ int i;
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* this indicates the client is gone, nothing to do here */
- if (!handle)
- return;
- client = handle->client;
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
- ion_handle_put(handle);
- ion_client_put(client);
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
+ mutex_lock(&buffer->lock);
+ set_bit(vmf->pgoff, buffer->dirty);
+
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (i != vmf->pgoff)
+ continue;
+ dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
+ vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ sg_page(sg));
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+ return VM_FAULT_NOPAGE;
}
-static struct vm_operations_struct ion_vm_ops = {
- .open = ion_vma_open,
- .close = ion_vma_close,
-};
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
-static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+static void ion_vm_close(struct vm_area_struct *vma)
{
- struct ion_buffer *buffer = file->private_data;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct ion_client *client;
- struct ion_handle *handle;
- int ret;
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* make sure the client still exists, it's possible for the client to
- have gone away but the map/share fd still to be around, take
- a reference to it so it can't go away while this mapping exists */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: trying to mmap an ion handle in a process with no "
- "ion client\n", __func__);
- return -EINVAL;
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
}
+ mutex_unlock(&buffer->lock);
+}
- if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
- buffer->size)) {
- pr_err("%s: trying to map larger area than handle has available"
- "\n", __func__);
- ret = -EINVAL;
- goto err;
- }
+struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
- /* find the handle and take a reference to it */
- handle = ion_import(client, buffer);
- if (IS_ERR_OR_NULL(handle)) {
- ret = -EINVAL;
- goto err;
- }
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
- if (!handle->buffer->heap->ops->map_user) {
+ if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping "
"to userspace\n", __func__);
- ret = -EINVAL;
- goto err1;
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
}
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
mutex_unlock(&buffer->lock);
- if (ret) {
+
+ if (ret)
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
- goto err1;
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
}
- vma->vm_ops = &ion_vm_ops;
- /* move the handle into the vm_private_data so we can access it from
- vma_open/close */
- vma->vm_private_data = handle;
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ if (!vaddr)
+ return -ENOMEM;
return 0;
+}
-err1:
- /* drop the reference to the handle */
- ion_handle_put(handle);
-err:
- /* drop the reference to the client */
- ion_client_put(client);
- return ret;
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
}
-static const struct file_operations ion_share_fops = {
- .owner = THIS_MODULE,
- .release = ion_share_release,
- .mmap = ion_share_mmap,
+struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
};
-static int ion_ioctl_share(struct file *parent, struct ion_client *client,
- struct ion_handle *handle)
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
- int fd = get_unused_fd();
- struct file *file;
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+ int fd;
- if (fd < 0)
- return -ENFILE;
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ return -EINVAL;
+ }
- file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
- handle->buffer, O_RDWR);
- if (IS_ERR_OR_NULL(file))
- goto err;
- ion_buffer_get(handle->buffer);
- fd_install(fd, file);
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return PTR_ERR(dmabuf);
+ }
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
-err:
- put_unused_fd(fd);
- return -ENFILE;
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR_OR_NULL(handle)) {
+ ion_handle_get(handle);
+ goto end;
+ }
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+ ion_handle_add(client, handle);
+end:
+ mutex_unlock(&client->lock);
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -946,9 +1058,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
data.handle = ion_alloc(client, data.len, data.align,
- data.flags);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ data.heap_mask, data.flags);
+
+ if (IS_ERR(data.handle))
+ return PTR_ERR(data.handle);
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ ion_free(client, data.handle);
return -EFAULT;
+ }
break;
}
case ION_IOC_FREE:
@@ -967,39 +1085,45 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ion_free(client, data.handle);
break;
}
- case ION_IOC_MAP:
case ION_IOC_SHARE:
{
struct ion_fd_data data;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, data.handle)) {
- pr_err("%s: invalid handle passed to share ioctl.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- data.fd = ion_ioctl_share(filp, client, data.handle);
- mutex_unlock(&client->lock);
+ data.fd = ion_share_dma_buf(client, data.handle);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
+ if (data.fd < 0)
+ return data.fd;
break;
}
case ION_IOC_IMPORT:
{
struct ion_fd_data data;
+ int ret = 0;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
-
- data.handle = ion_import_fd(client, data.fd);
- if (IS_ERR(data.handle))
+ data.handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(data.handle)) {
+ ret = PTR_ERR(data.handle);
data.handle = NULL;
+ }
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
+ if (ret < 0)
+ return ret;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ struct ion_fd_data data;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+ ion_sync_for_device(client, data.fd);
break;
}
case ION_IOC_CUSTOM:
@@ -1025,7 +1149,7 @@ static int ion_release(struct inode *inode, struct file *file)
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_put(client);
+ ion_client_destroy(client);
return 0;
}
@@ -1074,30 +1198,55 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
- char task_comm[TASK_COMM_LEN];
size_t size = ion_debug_heap_total(client, heap->type);
if (!size)
continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
- size);
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16u\n", client->name,
+ client->pid, size);
+ }
}
-
- for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
node);
- size_t size = ion_debug_heap_total(client, heap->type);
- if (!size)
+ if (buffer->heap->type != heap->type)
continue;
- seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
- size);
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
+ buffer->pid, buffer->size, buffer->kmap_cnt, buffer->ref);
+ total_orphaned_size += buffer->size;
+ }
}
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16u\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
return 0;
}
@@ -1119,8 +1268,13 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
struct rb_node *parent = NULL;
struct ion_heap *entry;
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
heap->dev = dev;
- mutex_lock(&dev->lock);
+ down_write(&dev->lock);
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_heap, node);
@@ -1141,7 +1295,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
&debug_heap_fops);
end:
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
}
struct ion_device *ion_device_create(long (*custom_ioctl)
@@ -1172,10 +1326,10 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
- mutex_init(&idev->lock);
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
idev->heaps = RB_ROOT;
- idev->user_clients = RB_ROOT;
- idev->kernel_clients = RB_ROOT;
+ idev->clients = RB_ROOT;
return idev;
}
@@ -1185,3 +1339,19 @@ void ion_device_destroy(struct ion_device *dev)
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+ ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index b4fcb3c92479..5b6255ba2da5 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -84,23 +84,41 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
-struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- return ERR_PTR(-EINVAL);
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
+ 0);
+ return table;
}
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- return;
+ sg_free_table(buffer->sg_table);
}
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ int mtype = MT_MEMORY_NONCACHED;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ mtype = MT_MEMORY;
+
return __arm_ioremap(buffer->priv_phys, buffer->size,
- MT_MEMORY_NONCACHED);
+ mtype);
}
void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
@@ -116,7 +134,7 @@ int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
{
return remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- buffer->size,
+ vma->vm_end - vma->vm_start,
pgprot_noncached(vma->vm_page_prot));
}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
new file mode 100644
index 000000000000..cd57b30e875f
--- /dev/null
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -0,0 +1,281 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/shrinker.h>
+#include "ion_priv.h"
+
+/* #define DEBUG_PAGE_POOL_SHRINKER */
+
+static struct plist_head pools = PLIST_HEAD_INIT(pools);
+static struct shrinker shrinker;
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+ /* this is only being used to flush the page for dma,
+ this api is not really suitable for calling from a driver
+ but no better way to flush a page for dma exist at this time */
+ __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+static int debug_drop_pools_set(void *data, u64 val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ shrinker.shrink(&shrinker, &sc);
+ return 0;
+}
+
+static int debug_drop_pools_get(void *data, u64 *val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
+ debug_drop_pools_set, "%llu\n");
+
+static int debug_grow_pools_set(void *data, u64 val)
+{
+ struct ion_page_pool *pool;
+ struct page *page;
+
+ plist_for_each_entry(pool, &pools, list) {
+ if (val != pool->list.prio)
+ continue;
+ page = ion_page_pool_alloc_pages(pool);
+ if (page)
+ ion_page_pool_add(pool, page);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
+ debug_grow_pools_set, "%llu\n");
+#endif
+
+static int ion_page_pool_total(bool high)
+{
+ struct ion_page_pool *pool;
+ int total = 0;
+
+ plist_for_each_entry(pool, &pools, list) {
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ }
+ return total;
+}
+
+static int ion_page_pool_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_page_pool *pool;
+ int nr_freed = 0;
+ int i;
+ bool high;
+ int nr_to_scan = sc->nr_to_scan;
+
+ if (sc->gfp_mask & __GFP_HIGHMEM)
+ high = true;
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(high);
+
+ plist_for_each_entry(pool, &pools, list) {
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+ nr_to_scan -= i;
+ }
+
+ return ion_page_pool_total(high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+ plist_add(&pool->list, &pools);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ plist_del(&pool->list, &pools);
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ shrinker.shrink = ion_page_pool_shrink;
+ shrinker.seeks = DEFAULT_SEEKS;
+ shrinker.batch = 0;
+ register_shrinker(&shrinker);
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+ debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
+ &debug_drop_pools_fops);
+ debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
+ &debug_grow_pools_fops);
+#endif
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 3323954c03a0..21c196305bff 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -17,23 +17,14 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/ion.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
-#include <linux/ion.h>
-
-struct ion_mapping;
-
-struct ion_dma_mapping {
- struct kref ref;
- struct scatterlist *sglist;
-};
-
-struct ion_kernel_mapping {
- struct kref ref;
- void *vaddr;
-};
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
@@ -53,7 +44,16 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
- * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @dirty: bitmask representing which pages of this buffer have
+ * been dirtied by the cpu and need cache maintenance
+ * before dma
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
*/
struct ion_buffer {
struct kref ref;
@@ -70,7 +70,13 @@ struct ion_buffer {
int kmap_cnt;
void *vaddr;
int dmap_cnt;
- struct scatterlist *sglist;
+ struct sg_table *sg_table;
+ unsigned long *dirty;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
};
/**
@@ -92,7 +98,7 @@ struct ion_heap_ops {
void (*free) (struct ion_buffer *buffer);
int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len);
- struct scatterlist *(*map_dma) (struct ion_heap *heap,
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
struct ion_buffer *buffer);
void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
@@ -111,6 +117,8 @@ struct ion_heap_ops {
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -124,9 +132,27 @@ struct ion_heap {
struct ion_heap_ops *ops;
int id;
const char *name;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
};
/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
* ion_device_create - allocates and returns an ion device
* @custom_ioctl: arch specific ioctl function if applicable
*
@@ -181,4 +207,51 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
*/
#define ION_CARVEOUT_ALLOCATE_FAIL -1
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ void *(*alloc)(struct ion_page_pool *pool);
+ void (*free)(struct ion_page_pool *pool, struct page *page);
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index c046cf1a3219..2a85df9ef89a 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -14,84 +14,312 @@
*
*/
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion_priv.h"
-static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NO_KSWAPD) & ~__GFP_WAIT;
+static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
{
- buffer->priv_virt = vmalloc_user(size);
- if (!buffer->priv_virt)
- return -ENOMEM;
- return 0;
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
}
-void ion_system_heap_free(struct ion_buffer *buffer)
+static unsigned int order_to_size(int order)
{
- vfree(buffer->priv_virt);
+ return PAGE_SIZE << order;
}
-struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
{
- struct scatterlist *sglist;
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return 0;
+ __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return 0;
+
+ if (split_pages)
+ split_page(page, order);
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
int i;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- void *vaddr = buffer->priv_virt;
- sglist = vmalloc(npages * sizeof(struct scatterlist));
- if (!sglist)
- return ERR_PTR(-ENOMEM);
- memset(sglist, 0, npages * sizeof(struct scatterlist));
- sg_init_table(sglist, npages);
- for (i = 0; i < npages; i++) {
- page = vmalloc_to_page(vaddr);
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ /* zero the pages before returning them to the pool for
+ security. This uses vmap as we want to set the pgprot so
+ the writes to occur to noncached mappings, as the pool's
+ purpose is to keep the pages out of the cache */
+ for (i = 0; i < (1 << order); i++) {
+ struct page *sub_page = page + i;
+ void *addr = vmap(&sub_page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ memset(addr, 0, PAGE_SIZE);
+ vunmap(addr);
+ }
+ ion_page_pool_free(pool, page);
+ } else if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
- goto end;
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- vaddr += PAGE_SIZE;
+ continue;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ info->page = page;
+ info->order = orders[i];
+ return info;
}
- /* XXX do cache maintenance for dma? */
- return sglist;
-end:
- vfree(sglist);
return NULL;
}
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ if (split_pages)
+ ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
+ GFP_KERNEL);
+ else
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ if (split_pages) {
+ for (i = 0; i < (1 << info->order); i++) {
+ sg_set_page(sg, page + i, PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ }
+ } else {
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
+ 0);
+ sg = sg_next(sg);
+ }
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry(info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- /* XXX undo cache maintenance for dma? */
- if (buffer->sglist)
- vfree(buffer->sglist);
+ return;
}
void *ion_system_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- return buffer->priv_virt;
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->priv_virt;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++) {
+ *(tmp++) = page++;
+ }
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ return vaddr;
}
void ion_system_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ vunmap(buffer->vaddr);
}
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
- return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
+ struct sg_table *table = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg_dma_len(sg);
+
+ if (offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg_dma_len(sg) - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
}
-static struct ion_heap_ops vmalloc_ops = {
+static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
@@ -101,21 +329,74 @@ static struct ion_heap_ops vmalloc_ops = {
.map_user = ion_system_heap_map_user,
};
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
- struct ion_heap *heap;
+ struct ion_system_heap *heap;
+ int i;
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
- heap->ops = &vmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM;
- return heap;
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
- kfree(heap);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
@@ -144,17 +425,30 @@ static int ion_system_contig_heap_phys(struct ion_heap *heap,
return 0;
}
-struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(struct scatterlist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
- return sglist;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+ 0);
+ return table;
+}
+
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
}
int ion_system_contig_heap_map_user(struct ion_heap *heap,
@@ -173,7 +467,7 @@ static struct ion_heap_ops kmalloc_ops = {
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_system_heap_map_kernel,
.unmap_kernel = ion_system_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 49a893972318..47e086858cd7 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -203,6 +203,16 @@ config BATTERY_MAX17042
with MAX17042. This driver also supports max17047/50 chips which are
improved version of max17042.
+config BATTERY_ANDROID
+ tristate "Battery driver for Android"
+ help
+ Say Y to enable generic support for battery charging according
+ to common Android policies.
+ This driver adds periodic battery level and health monitoring,
+ kernel log reporting and other debugging features, common board
+ battery file glue logic for battery/case temperature sensors,
+ etc.
+
config BATTERY_Z2
tristate "Z2 battery driver"
depends on I2C && MACH_ZIPIT2
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b949cf85590c..cd30257ed6ff 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -49,3 +49,4 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+obj-$(CONFIG_BATTERY_ANDROID) += android_battery.o
diff --git a/drivers/power/android_battery.c b/drivers/power/android_battery.c
new file mode 100644
index 000000000000..e0cefafbfb96
--- /dev/null
+++ b/drivers/power/android_battery.c
@@ -0,0 +1,767 @@
+/*
+ * android_battery.c
+ * Android Battery Driver
+ *
+ * Copyright (C) 2012 Google, Inc.
+ * Copyright (C) 2012 Samsung Electronics
+ *
+ * Based on work by himihee.seo@samsung.com, ms925.kim@samsung.com, and
+ * joshua.chang@samsung.com.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+#include <linux/workqueue.h>
+#include <linux/alarmtimer.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/platform_data/android_battery.h>
+
+#define FAST_POLL (1 * 60)
+#define SLOW_POLL (10 * 60)
+
+struct android_bat_data {
+ struct android_bat_platform_data *pdata;
+ struct android_bat_callbacks callbacks;
+
+ struct device *dev;
+
+ struct power_supply psy_bat;
+ struct power_supply psy_usb;
+ struct power_supply psy_ac;
+
+ struct wake_lock monitor_wake_lock;
+ struct wake_lock charger_wake_lock;
+
+ int charge_source;
+
+ int batt_temp;
+ int batt_current;
+ unsigned int batt_health;
+ unsigned int batt_vcell;
+ unsigned int batt_soc;
+ unsigned int charging_status;
+ bool recharging;
+ unsigned long charging_start_time;
+
+ struct workqueue_struct *monitor_wqueue;
+ struct work_struct monitor_work;
+ struct work_struct charger_work;
+
+ struct alarm monitor_alarm;
+ ktime_t last_poll;
+
+ struct dentry *debugfs_entry;
+};
+
+static char *supply_list[] = {
+ "android-battery",
+};
+
+static enum power_supply_property android_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static enum power_supply_property android_power_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static DEFINE_MUTEX(android_bat_state_lock);
+
+static void android_bat_update_data(struct android_bat_data *battery);
+static int android_bat_enable_charging(struct android_bat_data *battery,
+ bool enable);
+
+static char *charge_source_str(int charge_source)
+{
+ switch (charge_source) {
+ case CHARGE_SOURCE_NONE:
+ return "none";
+ case CHARGE_SOURCE_AC:
+ return "ac";
+ case CHARGE_SOURCE_USB:
+ return "usb";
+ default:
+ break;
+ }
+
+ return "?";
+}
+
+static int android_bat_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery =
+ container_of(ps, struct android_bat_data, psy_bat);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battery->charging_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = battery->batt_health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = battery->batt_temp;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ android_bat_update_data(battery);
+ val->intval = battery->batt_vcell;
+ if (val->intval == -1)
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery->batt_soc;
+ if (val->intval == -1)
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ android_bat_update_data(battery);
+ val->intval = battery->batt_current;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int android_usb_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery = container_of(ps,
+ struct android_bat_data, psy_usb);
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ val->intval = (battery->charge_source == CHARGE_SOURCE_USB);
+
+ return 0;
+}
+
+static int android_ac_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery = container_of(ps,
+ struct android_bat_data, psy_ac);
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ val->intval = (battery->charge_source == CHARGE_SOURCE_AC);
+
+ return 0;
+}
+
+static void android_bat_get_temp(struct android_bat_data *battery)
+{
+ int batt_temp = 42; /* 4.2C */
+ int health = battery->batt_health;
+
+ if (battery->pdata->get_temperature)
+ battery->pdata->get_temperature(&batt_temp);
+
+ if (battery->charge_source != CHARGE_SOURCE_NONE) {
+ if (batt_temp >= battery->pdata->temp_high_threshold) {
+ if (health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
+ pr_info("battery overheat (%d>=%d), " \
+ "charging unavailable\n",
+ batt_temp,
+ battery->pdata->temp_high_threshold);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_OVERHEAT;
+ }
+ } else if (batt_temp <= battery->pdata->temp_high_recovery &&
+ batt_temp >= battery->pdata->temp_low_recovery) {
+ if (health == POWER_SUPPLY_HEALTH_OVERHEAT ||
+ health == POWER_SUPPLY_HEALTH_COLD) {
+ pr_info("battery recovery (%d,%d~%d)," \
+ "charging available\n",
+ batt_temp,
+ battery->pdata->temp_low_recovery,
+ battery->pdata->temp_high_recovery);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_GOOD;
+ }
+ } else if (batt_temp <= battery->pdata->temp_low_threshold) {
+ if (health != POWER_SUPPLY_HEALTH_COLD &&
+ health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
+ pr_info("battery cold (%d <= %d)," \
+ "charging unavailable\n",
+ batt_temp,
+ battery->pdata->temp_low_threshold);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_COLD;
+ }
+ }
+ }
+
+ battery->batt_temp = batt_temp;
+}
+
+/*
+ * android_bat_state_lock not held, may call back into
+ * android_bat_charge_source_changed. Gathering data here can be
+ * non-atomic; updating our state based on the data may need to be
+ * atomic.
+ */
+
+static void android_bat_update_data(struct android_bat_data *battery)
+{
+ int ret;
+ int v;
+
+ if (battery->pdata->poll_charge_source)
+ battery->charge_source = battery->pdata->poll_charge_source();
+
+ if (battery->pdata->get_voltage_now) {
+ ret = battery->pdata->get_voltage_now();
+ battery->batt_vcell = ret >= 0 ? ret : 4242000;
+ }
+
+ if (battery->pdata->get_capacity) {
+ ret = battery->pdata->get_capacity();
+ battery->batt_soc = ret >= 0 ? ret : 42;
+ }
+
+ if (battery->pdata->get_current_now) {
+ ret = battery->pdata->get_current_now(&v);
+
+ if (!ret)
+ battery->batt_current = v;
+ }
+
+ android_bat_get_temp(battery);
+}
+
+static void android_bat_set_charge_time(struct android_bat_data *battery,
+ bool enable)
+{
+ if (enable && !battery->charging_start_time) {
+ struct timespec cur_time;
+
+ get_monotonic_boottime(&cur_time);
+ /* record start time for charge timeout timer */
+ battery->charging_start_time = cur_time.tv_sec;
+ } else if (!enable) {
+ /* clear charge timeout timer */
+ battery->charging_start_time = 0;
+ }
+}
+
+static int android_bat_enable_charging(struct android_bat_data *battery,
+ bool enable)
+{
+ if (enable && (battery->batt_health != POWER_SUPPLY_HEALTH_GOOD)) {
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return -EPERM;
+ }
+
+ if (enable) {
+ if (battery->pdata && battery->pdata->set_charging_current)
+ battery->pdata->set_charging_current
+ (battery->charge_source);
+ }
+
+ if (battery->pdata && battery->pdata->set_charging_enable)
+ battery->pdata->set_charging_enable(enable);
+
+ android_bat_set_charge_time(battery, enable);
+ pr_info("battery: enable=%d charger: %s\n", enable,
+ charge_source_str(battery->charge_source));
+ return 0;
+}
+
+static bool android_bat_charge_timeout(struct android_bat_data *battery,
+ unsigned long timeout)
+{
+ struct timespec cur_time;
+
+ if (!battery->charging_start_time)
+ return 0;
+
+ get_monotonic_boottime(&cur_time);
+ pr_debug("%s: Start time: %ld, End time: %ld, current time: %ld\n",
+ __func__, battery->charging_start_time,
+ battery->charging_start_time + timeout,
+ cur_time.tv_sec);
+ return cur_time.tv_sec >= battery->charging_start_time + timeout;
+}
+
+static void android_bat_charging_timer(struct android_bat_data *battery)
+{
+ if (!battery->charging_start_time &&
+ battery->charging_status == POWER_SUPPLY_STATUS_CHARGING) {
+ android_bat_enable_charging(battery, true);
+ battery->recharging = true;
+ pr_debug("%s: charge status charging but timer is expired\n",
+ __func__);
+ } else if (battery->charging_start_time == 0) {
+ pr_debug("%s: charging_start_time never initialized\n",
+ __func__);
+ return;
+ }
+
+ if (android_bat_charge_timeout(
+ battery,
+ battery->recharging ? battery->pdata->recharging_time :
+ battery->pdata->full_charging_time)) {
+ android_bat_enable_charging(battery, false);
+ if (battery->batt_vcell >
+ battery->pdata->recharging_voltage &&
+ battery->batt_soc == 100)
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_FULL;
+ battery->recharging = false;
+ battery->charging_start_time = 0;
+ pr_info("battery: charging timer expired\n");
+ }
+
+ return;
+}
+
+static void android_bat_charge_source_changed(struct android_bat_callbacks *ptr,
+ int charge_source)
+{
+ struct android_bat_data *battery =
+ container_of(ptr, struct android_bat_data, callbacks);
+
+ wake_lock(&battery->charger_wake_lock);
+ mutex_lock(&android_bat_state_lock);
+ battery->charge_source = charge_source;
+
+ pr_info("battery: charge source type was changed: %s\n",
+ charge_source_str(battery->charge_source));
+
+ mutex_unlock(&android_bat_state_lock);
+ queue_work(battery->monitor_wqueue, &battery->charger_work);
+}
+
+static void android_bat_set_full_status(struct android_bat_callbacks *ptr)
+{
+ struct android_bat_data *battery =
+ container_of(ptr, struct android_bat_data, callbacks);
+
+ mutex_lock(&android_bat_state_lock);
+ pr_info("battery: battery full\n");
+ battery->charging_status = POWER_SUPPLY_STATUS_FULL;
+ android_bat_enable_charging(battery, false);
+ battery->recharging = false;
+ mutex_unlock(&android_bat_state_lock);
+ power_supply_changed(&battery->psy_bat);
+}
+
+static void android_bat_charger_work(struct work_struct *work)
+{
+ struct android_bat_data *battery =
+ container_of(work, struct android_bat_data, charger_work);
+
+ mutex_lock(&android_bat_state_lock);
+
+ switch (battery->charge_source) {
+ case CHARGE_SOURCE_NONE:
+ battery->charging_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ android_bat_enable_charging(battery, false);
+ battery->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ battery->recharging = false;
+ battery->charging_start_time = 0;
+ break;
+ case CHARGE_SOURCE_USB:
+ case CHARGE_SOURCE_AC:
+ /*
+ * If charging status indicates a charger was already
+ * connected prior to this and the status is something
+ * other than charging ("full" or "not-charging"), leave
+ * the status alone.
+ */
+ if (battery->charging_status ==
+ POWER_SUPPLY_STATUS_DISCHARGING ||
+ battery->charging_status == POWER_SUPPLY_STATUS_UNKNOWN)
+ battery->charging_status = POWER_SUPPLY_STATUS_CHARGING;
+
+ /*
+ * Don't re-enable charging if the battery is full and we
+ * are not actively re-charging it, or if "not-charging"
+ * status is set.
+ */
+ if (!((battery->charging_status == POWER_SUPPLY_STATUS_FULL
+ && !battery->recharging) || battery->charging_status ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING))
+ android_bat_enable_charging(battery, true);
+
+ break;
+ default:
+ pr_err("%s: Invalid charger type\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&android_bat_state_lock);
+ wake_lock_timeout(&battery->charger_wake_lock, HZ * 2);
+ power_supply_changed(&battery->psy_ac);
+ power_supply_changed(&battery->psy_usb);
+}
+
+
+static void android_bat_monitor_set_alarm(struct android_bat_data *battery,
+ int seconds)
+{
+ alarm_start(&battery->monitor_alarm,
+ ktime_add(battery->last_poll, ktime_set(seconds, 0)));
+}
+
+static void android_bat_monitor_work(struct work_struct *work)
+{
+ struct android_bat_data *battery =
+ container_of(work, struct android_bat_data, monitor_work);
+ struct timespec cur_time;
+
+ wake_lock(&battery->monitor_wake_lock);
+ android_bat_update_data(battery);
+ mutex_lock(&android_bat_state_lock);
+
+ switch (battery->charging_status) {
+ case POWER_SUPPLY_STATUS_FULL:
+ if (battery->batt_vcell < battery->pdata->recharging_voltage &&
+ !battery->recharging) {
+ battery->recharging = true;
+ android_bat_enable_charging(battery, true);
+ pr_info("battery: start recharging, v=%d\n",
+ battery->batt_vcell/1000);
+ }
+ break;
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ switch (battery->batt_health) {
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ case POWER_SUPPLY_HEALTH_DEAD:
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ android_bat_enable_charging(battery, false);
+
+ pr_info("battery: Not charging, health=%d\n",
+ battery->batt_health);
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (battery->batt_health == POWER_SUPPLY_HEALTH_GOOD) {
+ pr_info("battery: battery health recovered\n");
+ if (battery->charge_source != CHARGE_SOURCE_NONE) {
+ android_bat_enable_charging(battery, true);
+ battery->charging_status
+ = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ battery->charging_status
+ = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ }
+ break;
+ default:
+ pr_err("%s: Undefined battery status: %d\n", __func__,
+ battery->charging_status);
+ break;
+ }
+
+ android_bat_charging_timer(battery);
+ get_monotonic_boottime(&cur_time);
+ pr_info("battery: l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n",
+ battery->batt_soc, battery->batt_vcell/1000,
+ battery->batt_current, battery->batt_temp < 0 ? "-" : "",
+ abs(battery->batt_temp / 10), abs(battery->batt_temp % 10),
+ battery->batt_health, battery->charging_status,
+ battery->recharging ? "r" : "",
+ battery->charging_start_time ?
+ cur_time.tv_sec - battery->charging_start_time : 0,
+ charge_source_str(battery->charge_source));
+ mutex_unlock(&android_bat_state_lock);
+ power_supply_changed(&battery->psy_bat);
+ battery->last_poll = ktime_get_boottime();
+ android_bat_monitor_set_alarm(battery, FAST_POLL);
+ wake_unlock(&battery->monitor_wake_lock);
+ return;
+}
+
+static enum alarmtimer_restart android_bat_monitor_alarm(
+ struct alarm *alarm, ktime_t now)
+{
+ struct android_bat_data *battery =
+ container_of(alarm, struct android_bat_data, monitor_alarm);
+
+ wake_lock(&battery->monitor_wake_lock);
+ queue_work(battery->monitor_wqueue, &battery->monitor_work);
+ return ALARMTIMER_NORESTART;
+}
+
+static int android_power_debug_dump(struct seq_file *s, void *unused)
+{
+ struct android_bat_data *battery = s->private;
+ struct timespec cur_time;
+
+ android_bat_update_data(battery);
+ get_monotonic_boottime(&cur_time);
+ mutex_lock(&android_bat_state_lock);
+ seq_printf(s, "l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n",
+ battery->batt_soc, battery->batt_vcell/1000,
+ battery->batt_current, battery->batt_temp < 0 ? "-" : "",
+ abs(battery->batt_temp / 10), abs(battery->batt_temp % 10),
+ battery->batt_health, battery->charging_status,
+ battery->recharging ? "r" : "",
+ battery->charging_start_time ?
+ cur_time.tv_sec - battery->charging_start_time : 0,
+ charge_source_str(battery->charge_source));
+ mutex_unlock(&android_bat_state_lock);
+ return 0;
+}
+
+static int android_power_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, android_power_debug_dump, inode->i_private);
+}
+
+static const struct file_operations android_power_debug_fops = {
+ .open = android_power_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __devinit int android_bat_probe(struct platform_device *pdev)
+{
+ struct android_bat_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct android_bat_data *battery;
+ int ret = 0;
+
+ dev_info(&pdev->dev, "Android Battery Driver\n");
+ battery = kzalloc(sizeof(*battery), GFP_KERNEL);
+ if (!battery)
+ return -ENOMEM;
+
+ battery->pdata = pdata;
+ if (!battery->pdata) {
+ pr_err("%s : No platform data\n", __func__);
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+
+ battery->dev = &pdev->dev;
+ platform_set_drvdata(pdev, battery);
+ battery->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+
+ battery->psy_bat.name = "android-battery",
+ battery->psy_bat.type = POWER_SUPPLY_TYPE_BATTERY,
+ battery->psy_bat.properties = android_battery_props,
+ battery->psy_bat.num_properties = ARRAY_SIZE(android_battery_props),
+ battery->psy_bat.get_property = android_bat_get_property,
+
+ battery->psy_usb.name = "android-usb",
+ battery->psy_usb.type = POWER_SUPPLY_TYPE_USB,
+ battery->psy_usb.supplied_to = supply_list,
+ battery->psy_usb.num_supplicants = ARRAY_SIZE(supply_list),
+ battery->psy_usb.properties = android_power_props,
+ battery->psy_usb.num_properties = ARRAY_SIZE(android_power_props),
+ battery->psy_usb.get_property = android_usb_get_property,
+
+ battery->psy_ac.name = "android-ac",
+ battery->psy_ac.type = POWER_SUPPLY_TYPE_MAINS,
+ battery->psy_ac.supplied_to = supply_list,
+ battery->psy_ac.num_supplicants = ARRAY_SIZE(supply_list),
+ battery->psy_ac.properties = android_power_props,
+ battery->psy_ac.num_properties = ARRAY_SIZE(android_power_props),
+ battery->psy_ac.get_property = android_ac_get_property;
+
+ battery->batt_vcell = -1;
+ battery->batt_soc = -1;
+
+ wake_lock_init(&battery->monitor_wake_lock, WAKE_LOCK_SUSPEND,
+ "android-battery-monitor");
+ wake_lock_init(&battery->charger_wake_lock, WAKE_LOCK_SUSPEND,
+ "android-chargerdetect");
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_bat);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_bat\n",
+ __func__);
+ goto err_psy_bat_reg;
+ }
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_usb);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_usb\n",
+ __func__);
+ goto err_psy_usb_reg;
+ }
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_ac);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_ac\n",
+ __func__);
+ goto err_psy_ac_reg;
+ }
+
+ battery->monitor_wqueue =
+ alloc_workqueue(dev_name(&pdev->dev), WQ_FREEZABLE, 1);
+ if (!battery->monitor_wqueue) {
+ dev_err(battery->dev, "%s: fail to create workqueue\n",
+ __func__);
+ goto err_wq;
+ }
+
+ INIT_WORK(&battery->monitor_work, android_bat_monitor_work);
+ INIT_WORK(&battery->charger_work, android_bat_charger_work);
+
+ battery->callbacks.charge_source_changed =
+ android_bat_charge_source_changed;
+ battery->callbacks.battery_set_full =
+ android_bat_set_full_status;
+ if (battery->pdata && battery->pdata->register_callbacks)
+ battery->pdata->register_callbacks(&battery->callbacks);
+
+ /* get initial charger status */
+ if (battery->pdata->poll_charge_source)
+ battery->charge_source = battery->pdata->poll_charge_source();
+
+ wake_lock(&battery->charger_wake_lock);
+ queue_work(battery->monitor_wqueue, &battery->charger_work);
+
+ wake_lock(&battery->monitor_wake_lock);
+ battery->last_poll = ktime_get_boottime();
+ alarm_init(&battery->monitor_alarm, ALARM_BOOTTIME,
+ android_bat_monitor_alarm);
+ queue_work(battery->monitor_wqueue, &battery->monitor_work);
+
+ battery->debugfs_entry =
+ debugfs_create_file("android-power", S_IRUGO, NULL,
+ battery, &android_power_debug_fops);
+ if (!battery->debugfs_entry)
+ pr_err("failed to create android-power debugfs entry\n");
+
+ return 0;
+
+err_wq:
+ power_supply_unregister(&battery->psy_ac);
+err_psy_ac_reg:
+ power_supply_unregister(&battery->psy_usb);
+err_psy_usb_reg:
+ power_supply_unregister(&battery->psy_bat);
+err_psy_bat_reg:
+ wake_lock_destroy(&battery->monitor_wake_lock);
+ wake_lock_destroy(&battery->charger_wake_lock);
+err_pdata:
+ kfree(battery);
+
+ return ret;
+}
+
+static int __devexit android_bat_remove(struct platform_device *pdev)
+{
+ struct android_bat_data *battery = platform_get_drvdata(pdev);
+
+ alarm_cancel(&battery->monitor_alarm);
+ flush_workqueue(battery->monitor_wqueue);
+ destroy_workqueue(battery->monitor_wqueue);
+ power_supply_unregister(&battery->psy_bat);
+ wake_lock_destroy(&battery->monitor_wake_lock);
+ wake_lock_destroy(&battery->charger_wake_lock);
+ debugfs_remove(battery->debugfs_entry);
+ kfree(battery);
+ return 0;
+}
+
+static int android_bat_suspend(struct device *dev)
+{
+ struct android_bat_data *battery = dev_get_drvdata(dev);
+
+ cancel_work_sync(&battery->monitor_work);
+ android_bat_monitor_set_alarm(
+ battery,
+ battery->charge_source == CHARGE_SOURCE_NONE ?
+ SLOW_POLL : FAST_POLL);
+ return 0;
+}
+
+static void android_bat_resume(struct device *dev)
+{
+ struct android_bat_data *battery = dev_get_drvdata(dev);
+
+ android_bat_monitor_set_alarm(battery, FAST_POLL);
+ return;
+}
+
+static const struct dev_pm_ops android_bat_pm_ops = {
+ .prepare = android_bat_suspend,
+ .complete = android_bat_resume,
+};
+
+static struct platform_driver android_bat_driver = {
+ .driver = {
+ .name = "android-battery",
+ .owner = THIS_MODULE,
+ .pm = &android_bat_pm_ops,
+ },
+ .probe = android_bat_probe,
+ .remove = __devexit_p(android_bat_remove),
+};
+
+static int __init android_bat_init(void)
+{
+ return platform_driver_register(&android_bat_driver);
+}
+
+static void __exit android_bat_exit(void)
+{
+ platform_driver_unregister(&android_bat_driver);
+}
+
+late_initcall(android_bat_init);
+module_exit(android_bat_exit);
+
+MODULE_DESCRIPTION("Android battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index bf9c2c25c26a..9af72fd180d4 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -695,6 +695,11 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EBADF;
break;
}
+ if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) ||
+ capable(CAP_SYSLOG))) {
+ ret = -EPERM;
+ break;
+ }
list_for_each_entry(reader, &log->readers, list)
reader->r_off = log->w_off;
log->head = log->w_off;
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index d2c3393237b6..9baded09188c 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -42,6 +42,8 @@
#include "epautoconf.c"
#include "composite.c"
+#include "f_fs.c"
+#include "f_audio_source.c"
#include "f_mass_storage.c"
#include "u_serial.c"
#include "f_acm.c"
@@ -109,6 +111,7 @@ struct android_dev {
bool connected;
bool sw_connected;
struct work_struct work;
+ char ffs_aliases[256];
};
static struct class *android_class;
@@ -218,6 +221,158 @@ static void android_disable(struct android_dev *dev)
/*-------------------------------------------------------------------------*/
/* Supported functions initialization */
+struct functionfs_config {
+ bool opened;
+ bool enabled;
+ struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+ functionfs_cleanup();
+ kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = true;
+
+ /* Disable the gadget until the function is ready */
+ if (!config->opened)
+ android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!config->opened)
+ android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct functionfs_config *config = f->config;
+ return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = _android_dev;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+ ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct android_dev *dev = _android_dev;
+ char buff[256];
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ strlcpy(buff, buf, sizeof(buff));
+ strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+ ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+ &dev_attr_aliases,
+ NULL
+};
+
+static struct android_usb_function ffs_function = {
+ .name = "ffs",
+ .init = ffs_function_init,
+ .enable = ffs_function_enable,
+ .disable = ffs_function_disable,
+ .cleanup = ffs_function_cleanup,
+ .bind_config = ffs_function_bind_config,
+ .attributes = ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ ret = functionfs_bind(ffs, dev->cdev);
+ if (ret)
+ goto err;
+
+ config->data = ffs;
+ config->opened = true;
+
+ if (config->enabled)
+ android_enable(dev);
+
+err:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ if (config->enabled)
+ android_disable(dev);
+
+ config->opened = false;
+ config->data = NULL;
+
+ functionfs_unbind(ffs);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+
struct adb_data {
bool opened;
bool enabled;
@@ -763,8 +918,70 @@ static struct android_usb_function accessory_function = {
.ctrlrequest = accessory_function_ctrlrequest,
};
+static int audio_source_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct audio_source_config *config;
+
+ config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+ config->card = -1;
+ config->device = -1;
+ f->config = config;
+ return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ config->card = -1;
+ config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct audio_source_config *config = f->config;
+
+ /* print PCM card and device numbers */
+ return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+ &dev_attr_pcm,
+ NULL
+};
+
+static struct android_usb_function audio_source_function = {
+ .name = "audio_source",
+ .init = audio_source_function_init,
+ .cleanup = audio_source_function_cleanup,
+ .bind_config = audio_source_function_bind_config,
+ .unbind_config = audio_source_function_unbind_config,
+ .attributes = audio_source_function_attributes,
+};
static struct android_usb_function *supported_functions[] = {
+ &ffs_function,
&adb_function,
&acm_function,
&mtp_function,
@@ -772,6 +989,7 @@ static struct android_usb_function *supported_functions[] = {
&rndis_function,
&mass_storage_function,
&accessory_function,
+ &audio_source_function,
NULL
};
@@ -915,7 +1133,10 @@ functions_store(struct device *pdev, struct device_attribute *attr,
struct android_dev *dev = dev_get_drvdata(pdev);
char *name;
char buf[256], *b;
+ char aliases[256], *a;
int err;
+ int is_ffs;
+ int ffs_enabled = 0;
mutex_lock(&dev->mutex);
@@ -926,16 +1147,42 @@ functions_store(struct device *pdev, struct device_attribute *attr,
INIT_LIST_HEAD(&dev->enabled_functions);
- strncpy(buf, buff, sizeof(buf));
+ strlcpy(buf, buff, sizeof(buf));
b = strim(buf);
while (b) {
name = strsep(&b, ",");
- if (name) {
- err = android_enable_function(dev, name);
+ if (!name)
+ continue;
+
+ is_ffs = 0;
+ strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+ a = aliases;
+
+ while (a) {
+ char *alias = strsep(&a, ",");
+ if (alias && !strcmp(name, alias)) {
+ is_ffs = 1;
+ break;
+ }
+ }
+
+ if (is_ffs) {
+ if (ffs_enabled)
+ continue;
+ err = android_enable_function(dev, "ffs");
if (err)
- pr_err("android_usb: Cannot enable '%s'", name);
+ pr_err("android_usb: Cannot enable ffs (%d)",
+ err);
+ else
+ ffs_enabled = 1;
+ continue;
}
+
+ err = android_enable_function(dev, name);
+ if (err)
+ pr_err("android_usb: Cannot enable '%s' (%d)",
+ name, err);
}
mutex_unlock(&dev->mutex);
@@ -966,7 +1213,6 @@ static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
sscanf(buff, "%d", &enabled);
if (enabled && !dev->enabled) {
- cdev->next_string_id = 0;
/*
* Update values in composite driver's copy of
* device descriptor.
@@ -1053,10 +1299,7 @@ field ## _store(struct device *dev, struct device_attribute *attr, \
{ \
if (size >= sizeof(buffer)) \
return -EINVAL; \
- if (sscanf(buf, "%s", buffer) == 1) { \
- return size; \
- } \
- return -1; \
+ return strlcpy(buffer, buf, sizeof(buffer)); \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
@@ -1118,7 +1361,7 @@ static int android_bind(struct usb_composite_dev *cdev)
{
struct android_dev *dev = _android_dev;
struct usb_gadget *gadget = cdev->gadget;
- int gcnum, id, ret;
+ int id, ret;
/*
* Start disconnected. Userspace will connect the gadget once
@@ -1156,15 +1399,6 @@ static int android_bind(struct usb_composite_dev *cdev)
strings_dev[STRING_SERIAL_IDX].id = id;
device_desc.iSerialNumber = id;
- gcnum = usb_gadget_controller_number(gadget);
- if (gcnum >= 0)
- device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
- else {
- pr_warning("%s: controller '%s' not recognized\n",
- longname, gadget->name);
- device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
- }
-
usb_gadget_set_selfpowered(gadget);
dev->cdev = cdev;
@@ -1184,6 +1418,7 @@ static struct usb_composite_driver android_usb_driver = {
.name = "android_usb",
.dev = &device_desc,
.strings = dev_strings,
+ .bind = android_bind,
.unbind = android_usb_unbind,
.max_speed = USB_SPEED_HIGH,
};
@@ -1240,6 +1475,11 @@ static void android_disconnect(struct usb_gadget *gadget)
unsigned long flags;
composite_disconnect(gadget);
+ /* accessory HID support can be active while the
+ accessory function is not actually enabled,
+ so we need to inform it when we are disconnected.
+ */
+ acc_disconnect();
spin_lock_irqsave(&cdev->lock, flags);
dev->connected = 0;
@@ -1300,10 +1540,10 @@ static int __init init(void)
_android_dev = dev;
/* Override composite driver functions */
- composite_driver.setup = android_setup;
- composite_driver.disconnect = android_disconnect;
+ composite_driver_template.setup = android_setup;
+ composite_driver_template.disconnect = android_disconnect;
- return usb_composite_probe(&android_usb_driver, android_bind);
+ return usb_composite_probe(&android_usb_driver);
}
module_init(init);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 957f973dd96a..c4460a54f917 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1528,8 +1528,11 @@ composite_resume(struct usb_gadget *gadget)
}
/*-------------------------------------------------------------------------*/
-
+#if IS_ENABLED(CONFIG_USB_G_ANDROID)
+static struct usb_gadget_driver composite_driver_template = {
+#else
static const struct usb_gadget_driver composite_driver_template = {
+#endif
.bind = composite_bind,
.unbind = composite_unbind,
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index a5818227611a..a244265c1143 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -33,6 +33,8 @@
#include <linux/device.h>
#include <linux/miscdevice.h>
+#include <linux/hid.h>
+#include <linux/hiddev.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/f_accessory.h>
@@ -40,7 +42,7 @@
#define BULK_BUFFER_SIZE 16384
#define ACC_STRING_SIZE 256
-#define PROTOCOL_VERSION 1
+#define PROTOCOL_VERSION 2
/* String IDs */
#define INTERFACE_STRING_INDEX 0
@@ -49,6 +51,20 @@
#define TX_REQ_MAX 4
#define RX_REQ_MAX 2
+struct acc_hid_dev {
+ struct list_head list;
+ struct hid_device *hid;
+ struct acc_dev *dev;
+ /* accessory defined ID */
+ int id;
+ /* HID report descriptor */
+ u8 *report_desc;
+ /* length of HID report descriptor */
+ int report_desc_len;
+ /* number of bytes of report_desc we have received so far */
+ int report_desc_offset;
+};
+
struct acc_dev {
struct usb_function function;
struct usb_composite_dev *cdev;
@@ -78,6 +94,8 @@ struct acc_dev {
/* set to 1 if we have a pending start request */
int start_requested;
+ int audio_mode;
+
/* synchronize access to our device file */
atomic_t open_excl;
@@ -87,7 +105,21 @@ struct acc_dev {
wait_queue_head_t write_wq;
struct usb_request *rx_req[RX_REQ_MAX];
int rx_done;
- struct delayed_work work;
+
+ /* delayed work for handling ACCESSORY_START */
+ struct delayed_work start_work;
+
+ /* worker for registering and unregistering hid devices */
+ struct work_struct hid_work;
+
+ /* list of active HID devices */
+ struct list_head hid_list;
+
+ /* list of new HID devices to register */
+ struct list_head new_hid_list;
+
+ /* list of dead HID devices to unregister */
+ struct list_head dead_hid_list;
};
static struct usb_interface_descriptor acc_interface_desc = {
@@ -296,7 +328,161 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
}
}
-static int __init create_bulk_endpoints(struct acc_dev *dev,
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ struct acc_dev *dev = hid->dev;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_hid_report_desc, err %d\n",
+ req->status);
+ return;
+ }
+
+ memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+ hid->report_desc_offset += length;
+ if (hid->report_desc_offset == hid->report_desc_len) {
+ /* After we have received the entire report descriptor
+ * we schedule work to initialize the HID device
+ */
+ schedule_work(&dev->hid_work);
+ }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+ return;
+ }
+
+ hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+ struct acc_hid_dev *hdev = hid->driver_data;
+
+ hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+ return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+ .parse = acc_hid_parse,
+ .start = acc_hid_start,
+ .stop = acc_hid_stop,
+ .open = acc_hid_open,
+ .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+ int id, int desc_len)
+{
+ struct acc_hid_dev *hdev;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+ if (!hdev)
+ return NULL;
+ hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+ if (!hdev->report_desc) {
+ kfree(hdev);
+ return NULL;
+ }
+ hdev->dev = dev;
+ hdev->id = id;
+ hdev->report_desc_len = desc_len;
+
+ return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+ struct acc_hid_dev *hid;
+
+ list_for_each_entry(hid, list, list) {
+ if (hid->id == id)
+ return hid;
+ }
+ return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ /* report descriptor length must be > 0 */
+ if (desc_length <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* replace HID if one already exists with this ID */
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (hid)
+ list_move(&hid->list, &dev->dead_hid_list);
+
+ hid = acc_hid_new(dev, id, desc_length);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOMEM;
+ }
+
+ list_add(&hid->list, &dev->new_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* schedule work to register the HID device */
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EINVAL;
+ }
+
+ list_move(&hid->list, &dev->dead_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
struct usb_endpoint_descriptor *in_desc,
struct usb_endpoint_descriptor *out_desc)
{
@@ -353,7 +539,7 @@ static int __init create_bulk_endpoints(struct acc_dev *dev,
return 0;
fail:
- printk(KERN_ERR "acc_bind() could not allocate requests\n");
+ pr_err("acc_bind() could not allocate requests\n");
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
@@ -510,6 +696,8 @@ static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
break;
case ACCESSORY_IS_START_REQUESTED:
return dev->start_requested;
+ case ACCESSORY_GET_AUDIO_MODE:
+ return dev->audio_mode;
}
if (!src)
return -EINVAL;
@@ -540,7 +728,7 @@ static int acc_release(struct inode *ip, struct file *fp)
return 0;
}
-/* file operations for /dev/acc_usb */
+/* file operations for /dev/usb_accessory */
static const struct file_operations acc_fops = {
.owner = THIS_MODULE,
.read = acc_read,
@@ -550,23 +738,47 @@ static const struct file_operations acc_fops = {
.release = acc_release,
};
+static int acc_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
static struct miscdevice acc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "usb_accessory",
.fops = &acc_fops,
};
+static const struct hid_device_id acc_hid_table[] = {
+ { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+ { }
+};
+
+static struct hid_driver acc_hid_driver = {
+ .name = "USB accessory",
+ .id_table = acc_hid_table,
+ .probe = acc_hid_probe,
+};
static int acc_ctrlrequest(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl)
{
struct acc_dev *dev = _acc_dev;
int value = -EOPNOTSUPP;
+ struct acc_hid_dev *hid;
+ int offset;
u8 b_requestType = ctrl->bRequestType;
u8 b_request = ctrl->bRequest;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
/*
printk(KERN_INFO "acc_ctrlrequest "
@@ -579,13 +791,49 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
if (b_request == ACCESSORY_START) {
dev->start_requested = 1;
schedule_delayed_work(
- &dev->work, msecs_to_jiffies(10));
+ &dev->start_work, msecs_to_jiffies(10));
value = 0;
} else if (b_request == ACCESSORY_SEND_STRING) {
dev->string_index = w_index;
cdev->gadget->ep0->driver_data = dev;
cdev->req->complete = acc_complete_set_string;
value = w_length;
+ } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+ w_index == 0 && w_length == 0) {
+ dev->audio_mode = w_value;
+ value = 0;
+ } else if (b_request == ACCESSORY_REGISTER_HID) {
+ value = acc_register_hid(dev, w_value, w_index);
+ } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+ value = acc_unregister_hid(dev, w_value);
+ } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->new_hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ offset = w_index;
+ if (offset != hid->report_desc_offset
+ || offset + w_length > hid->report_desc_len) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_set_hid_report_desc;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_send_hid_event;
+ value = w_length;
}
} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
if (b_request == ACCESSORY_GET_PROTOCOL) {
@@ -600,6 +848,7 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
memset(dev->uri, 0, sizeof(dev->uri));
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
+ dev->audio_mode = 0;
}
}
@@ -612,6 +861,7 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
__func__);
}
+err:
if (value == -EOPNOTSUPP)
VDBG(cdev,
"unknown class-specific control req "
@@ -631,6 +881,10 @@ acc_function_bind(struct usb_configuration *c, struct usb_function *f)
DBG(cdev, "acc_function_bind dev: %p\n", dev);
+ ret = hid_register_driver(&acc_hid_driver);
+ if (ret)
+ return ret;
+
dev->start_requested = 0;
/* allocate interface ID(s) */
@@ -660,6 +914,36 @@ acc_function_bind(struct usb_configuration *c, struct usb_function *f)
}
static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+ struct acc_hid_dev *hid;
+ struct list_head *entry, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_safe(entry, temp, &dev->hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+ hid_unregister_driver(&acc_hid_driver);
+ kill_all_hid_devices(dev);
+}
+
+static void
acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct acc_dev *dev = func_to_dev(f);
@@ -670,14 +954,104 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
acc_request_free(dev->rx_req[i], dev->ep_out);
+
+ acc_hid_unbind(dev);
}
-static void acc_work(struct work_struct *data)
+static void acc_start_work(struct work_struct *data)
{
char *envp[2] = { "ACCESSORY=START", NULL };
kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
}
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &acc_hid_ll_driver;
+ hid->dev.parent = acc_device.this_device;
+
+ hid->bus = BUS_USB;
+ hid->vendor = HID_ANY_ID;
+ hid->product = HID_ANY_ID;
+ hid->driver_data = hdev;
+ ret = hid_add_device(hid);
+ if (ret) {
+ pr_err("can't add hid device: %d\n", ret);
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ hdev->hid = hid;
+ return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+ kfree(hid->report_desc);
+ kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+ struct acc_dev *dev = _acc_dev;
+ struct list_head *entry, *temp;
+ struct acc_hid_dev *hid;
+ struct list_head new_list, dead_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&new_list);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* copy hids that are ready for initialization to new_list */
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (hid->report_desc_offset == hid->report_desc_len)
+ list_move(&hid->list, &new_list);
+ }
+
+ if (list_empty(&dev->dead_hid_list)) {
+ INIT_LIST_HEAD(&dead_list);
+ } else {
+ /* move all of dev->dead_hid_list to dead_list */
+ dead_list.prev = dev->dead_hid_list.prev;
+ dead_list.next = dev->dead_hid_list.next;
+ dead_list.next->prev = &dead_list;
+ dead_list.prev->next = &dead_list;
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* register new HID devices */
+ list_for_each_safe(entry, temp, &new_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (acc_hid_init(hid)) {
+ pr_err("can't add HID device %p\n", hid);
+ acc_hid_delete(hid);
+ } else {
+ spin_lock_irqsave(&dev->lock, flags);
+ list_move(&hid->list, &dev->hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ }
+
+ /* remove dead HID devices */
+ list_for_each_safe(entry, temp, &dead_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ if (hid->hid)
+ hid_destroy_device(hid->hid);
+ acc_hid_delete(hid);
+ }
+}
+
static int acc_function_set_alt(struct usb_function *f,
unsigned intf, unsigned alt)
{
@@ -771,7 +1145,11 @@ static int acc_setup(void)
init_waitqueue_head(&dev->write_wq);
atomic_set(&dev->open_excl, 0);
INIT_LIST_HEAD(&dev->tx_idle);
- INIT_DELAYED_WORK(&dev->work, acc_work);
+ INIT_LIST_HEAD(&dev->hid_list);
+ INIT_LIST_HEAD(&dev->new_hid_list);
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+ INIT_WORK(&dev->hid_work, acc_hid_work);
/* _acc_dev must be set before calling usb_gadget_register_driver */
_acc_dev = dev;
@@ -784,10 +1162,16 @@ static int acc_setup(void)
err:
kfree(dev);
- printk(KERN_ERR "USB accessory gadget driver failed to initialize\n");
+ pr_err("USB accessory gadget driver failed to initialize\n");
return ret;
}
+static void acc_disconnect(void)
+{
+ /* unregister all HID devices if USB is disconnected */
+ kill_all_hid_devices(_acc_dev);
+}
+
static void acc_cleanup(void)
{
misc_deregister(&acc_device);
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 000000000000..c757409edf94
--- /dev/null
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE 0
+#define AUDIO_AS_INTERFACE 1
+#define AUDIO_NUM_INTERFACES 2
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+ + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ [0] = AUDIO_AC_INTERFACE,
+ [1] = AUDIO_AS_INTERFACE,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = SAMPLE_RATE,
+ .rate_max = SAMPLE_RATE,
+
+ .buffer_bytes_max = 1024 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 512 * 1024,
+ .periods_min = 2,
+ .periods_max = 1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+ int card;
+ int device;
+};
+
+struct audio_dev {
+ struct usb_function func;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *substream;
+
+ struct list_head idle_reqs;
+ struct usb_ep *in_ep;
+
+ spinlock_t lock;
+
+ /* beginning, end and current position in our buffer */
+ void *buffer_start;
+ void *buffer_end;
+ void *buffer_pos;
+
+ /* byte size of a "period" */
+ unsigned int period;
+ /* bytes sent since last call to snd_pcm_period_elapsed */
+ unsigned int period_offset;
+ /* time we started playing */
+ ktime_t start_time;
+ /* number of frames sent since start_time */
+ s64 frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ list_add_tail(&req->list, &audio->idle_reqs);
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ if (list_empty(&audio->idle_reqs)) {
+ req = 0;
+ } else {
+ req = list_first_entry(&audio->idle_reqs, struct usb_request,
+ list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+ struct snd_pcm_runtime *runtime;
+ struct usb_request *req;
+ int length, length1, length2, ret;
+ s64 msecs;
+ s64 frames;
+ ktime_t now;
+
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream)
+ return;
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos)
+ return;
+
+ runtime = audio->substream->runtime;
+
+ /* compute number of frames to send */
+ now = ktime_get();
+ msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+ do_div(msecs, 1000000);
+ frames = msecs * SAMPLE_RATE;
+ do_div(frames, 1000);
+
+ /* Readjust our frames_sent if we fall too far behind.
+ * If we get too far behind it is better to drop some frames than
+ * to keep sending data too fast in an attempt to catch up.
+ */
+ if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+ audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+ frames -= audio->frames_sent;
+
+ /* We need to send something to keep the pipeline going */
+ if (frames <= 0)
+ frames = FRAMES_PER_MSEC;
+
+ while (frames > 0) {
+ req = audio_req_get(audio);
+ if (!req)
+ break;
+
+ length = frames_to_bytes(runtime, frames);
+ if (length > IN_EP_MAX_PACKET_SIZE)
+ length = IN_EP_MAX_PACKET_SIZE;
+
+ if (audio->buffer_pos + length > audio->buffer_end)
+ length1 = audio->buffer_end - audio->buffer_pos;
+ else
+ length1 = length;
+ memcpy(req->buf, audio->buffer_pos, length1);
+ if (length1 < length) {
+ /* Wrap around and copy remaining length
+ * at beginning of buffer.
+ */
+ length2 = length - length1;
+ memcpy(req->buf + length1, audio->buffer_start,
+ length2);
+ audio->buffer_pos = audio->buffer_start + length2;
+ } else {
+ audio->buffer_pos += length1;
+ if (audio->buffer_pos >= audio->buffer_end)
+ audio->buffer_pos = audio->buffer_start;
+ }
+
+ req->length = length;
+ ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("usb_ep_queue failed ret: %d\n", ret);
+ audio_req_put(audio, req);
+ break;
+ }
+
+ frames -= bytes_to_frames(runtime, length);
+ audio->frames_sent += bytes_to_frames(runtime, length);
+ }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct audio_dev *audio = req->context;
+
+ pr_debug("audio_data_complete req->status %d req->actual %d\n",
+ req->status, req->actual);
+
+ audio_req_put(audio, req);
+
+ if (!audio->buffer_start || req->status)
+ return;
+
+ audio->period_offset += req->actual;
+ if (audio->period_offset >= audio->period) {
+ snd_pcm_period_elapsed(audio->substream);
+ audio->period_offset = 0;
+ }
+ audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ case UAC_SET_MIN:
+ case UAC_SET_MAX:
+ case UAC_SET_RES:
+ value = len;
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 *buf = cdev->req->buf;
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ /* return our sample rate */
+ buf[0] = (u8)SAMPLE_RATE;
+ buf[1] = (u8)(SAMPLE_RATE >> 8);
+ buf[2] = (u8)(SAMPLE_RATE >> 16);
+ value = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ req->complete = audio_control_complete;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+ if (ret)
+ return ret;
+
+ usb_ep_enable(audio->in_ep);
+ return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ pr_debug("audio_disable\n");
+ usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = 2;
+ as_type_i_desc.bNrChannels = 2;
+
+ /* Set sample rates */
+ rate = SAMPLE_RATE;
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct audio_dev *audio = func_to_audio(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_request *req;
+ int i;
+
+ audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate our endpoint */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ ep->driver_data = audio; /* claim */
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ hs_as_in_ep_desc.bEndpointAddress =
+ fs_as_in_ep_desc.bEndpointAddress;
+
+ f->descriptors = fs_audio_desc;
+ f->hs_descriptors = hs_audio_desc;
+
+ for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+ req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+ if (req) {
+ req->context = audio;
+ req->complete = audio_data_complete;
+ audio_req_put(audio, req);
+ } else
+ status = -ENOMEM;
+ }
+
+fail:
+ return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_request *req;
+
+ while ((req = audio_req_get(audio)))
+ audio_request_free(req, audio->in_ep);
+
+ snd_card_free_when_closed(audio->card);
+ audio->card = NULL;
+ audio->pcm = NULL;
+ audio->substream = NULL;
+ audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+ audio->start_time = ktime_get();
+ audio->frames_sent = 0;
+ audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->buffer_start = 0;
+ audio->buffer_end = 0;
+ audio->buffer_pos = 0;
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = substream->private_data;
+
+ runtime->private_data = audio;
+ runtime->hw = audio_hw_info;
+ snd_pcm_limit_hw_rates(runtime);
+ runtime->hw.channels_max = 2;
+
+ audio->substream = substream;
+ return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct audio_dev *audio = substream->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->substream = NULL;
+ spin_unlock_irqrestore(&audio->lock, flags);
+
+ return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+
+ if (rate != SAMPLE_RATE)
+ return -EINVAL;
+ if (channels != 2)
+ return -EINVAL;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+
+ audio->period = snd_pcm_lib_period_bytes(substream);
+ audio->period_offset = 0;
+ audio->buffer_start = runtime->dma_area;
+ audio->buffer_end = audio->buffer_start
+ + snd_pcm_lib_buffer_bytes(substream);
+ audio->buffer_pos = audio->buffer_start;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+ ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+ /* return offset of next frame to fill in our buffer */
+ return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct audio_dev *audio = substream->runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ audio_pcm_playback_start(audio);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ audio_pcm_playback_stop(audio);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct audio_dev _audio_dev = {
+ .func = {
+ .name = "audio_source",
+ .bind = audio_bind,
+ .unbind = audio_unbind,
+ .set_alt = audio_set_alt,
+ .setup = audio_setup,
+ .disable = audio_disable,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+ .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+ .open = audio_pcm_open,
+ .close = audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = audio_pcm_hw_params,
+ .hw_free = audio_pcm_hw_free,
+ .prepare = audio_pcm_prepare,
+ .trigger = audio_pcm_playback_trigger,
+ .pointer = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+ struct audio_source_config *config)
+{
+ struct audio_dev *audio;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int err;
+
+ config->card = -1;
+ config->device = -1;
+
+ audio = &_audio_dev;
+
+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (err)
+ return err;
+
+ snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+ err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+ if (err)
+ goto pcm_fail;
+ pcm->private_data = audio;
+ pcm->info_flags = 0;
+ audio->pcm = pcm;
+
+ strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ NULL, 0, 64 * 1024);
+
+ strlcpy(card->driver, "audio_source", sizeof(card->driver));
+ strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+ strlcpy(card->longname, "USB accessory audio source",
+ sizeof(card->longname));
+
+ err = snd_card_register(card);
+ if (err)
+ goto register_fail;
+
+ err = usb_add_function(c, &audio->func);
+ if (err)
+ goto add_fail;
+
+ config->card = pcm->card->number;
+ config->device = pcm->device;
+ audio->card = card;
+ return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+ snd_card_free(audio->card);
+ return err;
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index b1681e45aca7..72321fef65fd 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -820,7 +820,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
rndis_deregister(rndis->config);
rndis_exit();
- rndis_string_defs[0].id = 0;
if (gadget_is_superspeed(c->cdev->gadget))
usb_free_descriptors(f->ss_descriptors);
@@ -851,14 +850,14 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
if (!can_support_rndis(c) || !ethaddr)
return -EINVAL;
+ /* setup RNDIS itself */
+ status = rndis_init();
+ if (status < 0)
+ return status;
+
/* maybe allocate device-global string IDs */
if (rndis_string_defs[0].id == 0) {
- /* ... and setup RNDIS itself */
- status = rndis_init();
- if (status < 0)
- return status;
-
/* control interface label */
status = usb_string_id(c->cdev);
if (status < 0)
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index e4192b887de9..81451e976c70 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1127,11 +1127,15 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+static bool rndis_initialized;
int rndis_init(void)
{
u8 i;
+ if (rndis_initialized)
+ return 0;
+
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
char name [20];
@@ -1158,6 +1162,7 @@ int rndis_init(void)
INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
}
+ rndis_initialized = true;
return 0;
}
@@ -1166,7 +1171,13 @@ void rndis_exit(void)
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
char name[20];
+#endif
+ if (!rndis_initialized)
+ return;
+ rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
sprintf(name, NAME_TEMPLATE, i);
remove_proc_entry(name, NULL);
diff --git a/include/linux/ion.h b/include/linux/ion.h
index aed8349279ed..4924a596dcdd 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -42,6 +42,18 @@ enum ion_heap_type {
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+/**
+ * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
#ifdef __KERNEL__
struct ion_device;
struct ion_heap;
@@ -87,6 +99,17 @@ struct ion_platform_data {
};
/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @heap_mask: mask of heaps this client can allocate from
@@ -110,14 +133,18 @@ void ion_client_destroy(struct ion_client *client);
* @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks have
* alignment requirements of some kind
- * @flags: mask of heaps to allocate from, if multiple bits are set
+ * @heap_mask: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from lowest to highest order bit
+ * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
+ * bits are passed on to the respective heap and can be heap
+ * custom
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int flags);
+ size_t align, unsigned int heap_mask,
+ unsigned int flags);
/**
* ion_free - free a handle
@@ -138,7 +165,7 @@ void ion_free(struct ion_client *client, struct ion_handle *handle);
* This function queries the heap for a particular handle to get the
* handle's physical address. It't output is only correct if
* a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_map_dma should be used
+ * this api should not be implemented -- ion_sg_table should be used
* instead. Returns -EINVAL if the handle is invalid. This has
* no implications on the reference counting of the handle --
* the returned value may not be valid if the caller is not
@@ -148,6 +175,17 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len);
/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
@@ -165,63 +203,23 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_map_dma - create a dma mapping for a given handle
+ * ion_share_dma_buf() - given an ion client, create a dma-buf fd
* @client: the client
- * @handle: handle to map
- *
- * Return an sglist describing the given handle
- */
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_unmap_dma() - destroy a dma mapping for a handle
- * @client: the client
- * @handle: handle to unmap
+ * @handle: the handle
*/
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_share() - given a handle, obtain a buffer to pass to other clients
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
* @client: the client
- * @handle: the handle to share
- *
- * Given a handle, return a buffer, which exists in a global name
- * space, and can be passed to other clients. Should be passed into ion_import
- * to obtain a new handle for this buffer.
- *
- * NOTE: This function does do not an extra reference. The burden is on the
- * caller to make sure the buffer doesn't go away while it's being passed to
- * another client. That is, ion_free should not be called on this handle until
- * the buffer has been imported into the other client.
- */
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_import() - given an buffer in another client, import it
- * @client: this blocks client
- * @buffer: the buffer to import (as obtained from ion_share)
+ * @fd: the dma-buf fd
*
- * Given a buffer, add it to the client and return the handle to use to refer
- * to it further. This is called to share a handle from one kernel client to
- * another.
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer);
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
-/**
- * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
- * @client: this blocks client
- * @fd: the fd
- *
- * A helper function for drivers that will be recieving ion buffers shared
- * with them from userspace. These buffers are represented by a file
- * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
- * This function coverts that fd into the underlying buffer, and returns
- * the handle to use to refer to it further.
- */
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
#endif /* __KERNEL__ */
/**
@@ -236,6 +234,7 @@ struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @align: required alignment of the allocation
+ * @heap_mask: mask of heaps to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to refer
* to this allocation
@@ -245,6 +244,7 @@ struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
struct ion_allocation_data {
size_t len;
size_t align;
+ unsigned int heap_mask;
unsigned int flags;
struct ion_handle *handle;
};
@@ -331,7 +331,17 @@ struct ion_custom_data {
* descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
* filed set to the corresponding opaque handle.
*/
-#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
/**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
diff --git a/include/linux/platform_data/android_battery.h b/include/linux/platform_data/android_battery.h
new file mode 100644
index 000000000000..f6c8298fd885
--- /dev/null
+++ b/include/linux/platform_data/android_battery.h
@@ -0,0 +1,47 @@
+/*
+ * android_battery.h
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_ANDROID_BATTERY_H
+#define _LINUX_ANDROID_BATTERY_H
+
+enum {
+ CHARGE_SOURCE_NONE = 0,
+ CHARGE_SOURCE_AC,
+ CHARGE_SOURCE_USB,
+};
+
+struct android_bat_callbacks {
+ void (*charge_source_changed)
+ (struct android_bat_callbacks *, int);
+ void (*battery_set_full)(struct android_bat_callbacks *);
+};
+
+struct android_bat_platform_data {
+ void (*register_callbacks)(struct android_bat_callbacks *);
+ void (*unregister_callbacks)(void);
+ void (*set_charging_current) (int);
+ void (*set_charging_enable) (int);
+ int (*poll_charge_source) (void);
+ int (*get_capacity) (void);
+ int (*get_temperature) (int *);
+ int (*get_voltage_now)(void);
+ int (*get_current_now)(int *);
+
+ int temp_high_threshold;
+ int temp_high_recovery;
+ int temp_low_recovery;
+ int temp_low_threshold;
+
+ unsigned long full_charging_time;
+ unsigned long recharging_time;
+ unsigned int recharging_voltage;
+};
+
+#endif
diff --git a/include/linux/sync.h b/include/linux/sync.h
index 4f1993871467..38ea986dc70f 100644
--- a/include/linux/sync.h
+++ b/include/linux/sync.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#ifdef __KERNEL__
+#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -39,14 +40,14 @@ struct sync_fence;
* -1 if a will signabl before b
* @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed
- * @print_obj: print aditional debug information about sync_timeline.
- * should not print a newline
- * @print_pt: print aditional debug information about sync_pt.
- * should not print a newline
+ * @print_obj: deprecated
+ * @print_pt: deprecated
* @fill_driver_data: write implmentation specific driver data to data.
* should return an error if there is not enough room
* as specified by size. This information is returned
* to userspace by SYNC_IOC_FENCE_INFO.
+ * @timeline_value_str: fill str with the value of the sync_timeline's counter
+ * @pt_value_str: fill str with the value of the sync_pt
*/
struct sync_timeline_ops {
const char *driver_name;
@@ -66,19 +67,27 @@ struct sync_timeline_ops {
/* optional */
void (*release_obj)(struct sync_timeline *sync_timeline);
- /* optional */
+ /* deprecated */
void (*print_obj)(struct seq_file *s,
struct sync_timeline *sync_timeline);
- /* optional */
+ /* deprecated */
void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
/* optional */
int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+
+ /* optional */
+ void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
+ int size);
+
+ /* optional */
+ void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
};
/**
* struct sync_timeline - sync object
+ * @kref: reference count on fence.
* @ops: ops that define the implementaiton of the sync_timeline
* @name: name of the sync_timeline. Useful for debugging
* @destoryed: set when sync_timeline is destroyed
@@ -89,6 +98,7 @@ struct sync_timeline_ops {
* @sync_timeline_list: membership in global sync_timeline_list
*/
struct sync_timeline {
+ struct kref kref;
const struct sync_timeline_ops *ops;
char name[32];
@@ -109,6 +119,7 @@ struct sync_timeline {
* @parent: sync_timeline to which this sync_pt belongs
* @child_list: membership in sync_timeline.child_list_head
* @active_list: membership in sync_timeline.active_list_head
+ * @signaled_list: membership in temorary signaled_list on stack
* @fence: sync_fence to which the sync_pt belongs
* @pt_list: membership in sync_fence.pt_list_head
* @status: 1: signaled, 0:active, <0: error
@@ -120,6 +131,7 @@ struct sync_pt {
struct list_head child_list;
struct list_head active_list;
+ struct list_head signaled_list;
struct sync_fence *fence;
struct list_head pt_list;
@@ -133,6 +145,7 @@ struct sync_pt {
/**
* struct sync_fence - sync fence
* @file: file representing this fence
+ * @kref: referenace count on fence.
* @name: name of sync_fence. Useful for debugging
* @pt_list_head: list of sync_pts in ths fence. immutable once fence
* is created
@@ -145,6 +158,7 @@ struct sync_pt {
*/
struct sync_fence {
struct file *file;
+ struct kref kref;
char name[32];
/* this list is immutable once the fence is created */
@@ -159,6 +173,10 @@ struct sync_fence {
struct list_head sync_fence_list;
};
+struct sync_fence_waiter;
+typedef void (*sync_callback_t)(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
/**
* struct sync_fence_waiter - metadata for asynchronous waiter on a fence
* @waiter_list: membership in sync_fence.waiter_list_head
@@ -168,10 +186,15 @@ struct sync_fence {
struct sync_fence_waiter {
struct list_head waiter_list;
- void (*callback)(struct sync_fence *fence, void *data);
- void *callback_data;
+ sync_callback_t callback;
};
+static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
+ sync_callback_t callback)
+{
+ waiter->callback = callback;
+}
+
/*
* API for sync_timeline implementers
*/
@@ -284,24 +307,37 @@ void sync_fence_install(struct sync_fence *fence, int fd);
/**
* sync_fence_wait_async() - registers and async wait on the fence
* @fence: fence to wait on
- * @callback: callback
- * @callback_data data to pass to the callback
+ * @waiter: waiter callback struck
*
* Returns 1 if @fence has already signaled.
*
- * Registers a callback to be called when @fence signals or has an error
+ * Registers a callback to be called when @fence signals or has an error.
+ * @waiter should be initialized with sync_fence_waiter_init().
*/
int sync_fence_wait_async(struct sync_fence *fence,
- void (*callback)(struct sync_fence *, void *data),
- void *callback_data);
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_cancel_async() - cancels an async wait
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * returns 0 if waiter was removed from fence's async waiter list.
+ * returns -ENOENT if waiter was not found on fence's async waiter list.
+ *
+ * Cancels a previously registered async wait. Will fail gracefully if
+ * @waiter was never registered or if @fence has already signaled @waiter.
+ */
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
/**
* sync_fence_wait() - wait on fence
* @fence: fence to wait on
* @tiemout: timeout in ms
*
- * Wait for @fence to be signaled or have an error. Waits indefintly
- * if @timeout = 0
+ * Wait for @fence to be signaled or have an error. Waits indefinitely
+ * if @timeout < 0
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
@@ -360,9 +396,9 @@ struct sync_fence_info_data {
/**
* DOC: SYNC_IOC_WAIT - wait for a fence to signal
*
- * pass timeout in milliseconds.
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
*/
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __u32)
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
/**
* DOC: SYNC_IOC_MERGE - merge two fences
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
index 5b2dcf9728e1..61ebe0aabc5b 100644
--- a/include/linux/usb/f_accessory.h
+++ b/include/linux/usb/f_accessory.h
@@ -36,13 +36,15 @@
#define ACCESSORY_STRING_URI 4
#define ACCESSORY_STRING_SERIAL 5
-/* Control request for retrieving device's protocol version (currently 1)
+/* Control request for retrieving device's protocol version
*
* requestType: USB_DIR_IN | USB_TYPE_VENDOR
* request: ACCESSORY_GET_PROTOCOL
* value: 0
* index: 0
* data version number (16 bits little endian)
+ * 1 for original accessory support
+ * 2 adds HID and device to host audio support
*/
#define ACCESSORY_GET_PROTOCOL 51
@@ -70,6 +72,65 @@
*/
#define ACCESSORY_START 53
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID_DEVICE
+ * value: Accessory assigned ID for the HID device
+ * index: total length of the HID report descriptor
+ * data none
+ */
+#define ACCESSORY_REGISTER_HID 54
+
+/* Control request for unregistering a HID device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_UNREGISTER_HID 55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_HID_REPORT_DESC
+ * value: Accessory assigned ID for the HID device
+ * index: offset of data in descriptor
+ * (needed when HID descriptor is too big for one packet)
+ * data the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC 56
+
+/* Control request for sending HID events.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_HID_EVENT
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT 57
+
+/* Control request for setting the audio mode.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_AUDIO_MODE
+ * value: 0 - no audio
+ * 1 - device to host, 44100 16-bit stereo PCM
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_SET_AUDIO_MODE 58
+
/* ioctls for retrieving strings set by the host */
#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256])
#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256])
@@ -79,5 +140,7 @@
#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256])
/* returns 1 if there is a start request pending */
#define ACCESSORY_IS_START_REQUESTED _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE _IO('M', 8)
#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index ba0634e2f9d7..b2ce6b099da6 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -36,44 +36,50 @@ DEFINE_EVENT(set, cpufreq_interactive_setspeed,
DECLARE_EVENT_CLASS(loadeval,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq),
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
TP_STRUCT__entry(
__field(unsigned long, cpu_id )
__field(unsigned long, load )
- __field(unsigned long, curfreq )
- __field(unsigned long, targfreq )
+ __field(unsigned long, curtarg )
+ __field(unsigned long, curactual )
+ __field(unsigned long, newtarg )
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->load = load;
- __entry->curfreq = curfreq;
- __entry->targfreq = targfreq;
+ __entry->curtarg = curtarg;
+ __entry->curactual = curactual;
+ __entry->newtarg = newtarg;
),
- TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
- __entry->cpu_id, __entry->load, __entry->curfreq,
- __entry->targfreq)
+ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curtarg,
+ __entry->curactual, __entry->newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_target,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_already,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
TRACE_EVENT(cpufreq_interactive_boost,
diff --git a/include/trace/events/sync.h b/include/trace/events/sync.h
new file mode 100644
index 000000000000..f31bc63ca65d
--- /dev/null
+++ b/include/trace/events/sync.h
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sync
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include <linux/sync.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ if (timeline->ops->timeline_value_str) {
+ timeline->ops->timeline_value_str(timeline,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+);
+
+TRACE_EVENT(sync_wait,
+ TP_PROTO(struct sync_fence *fence, int begin),
+
+ TP_ARGS(fence, begin),
+
+ TP_STRUCT__entry(
+ __string(name, fence->name)
+ __field(s32, status)
+ __field(u32, begin)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, fence->name);
+ __entry->status = fence->status;
+ __entry->begin = begin;
+ ),
+
+ TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
+ __get_str(name), __entry->status)
+);
+
+TRACE_EVENT(sync_pt,
+ TP_PROTO(struct sync_pt *pt),
+
+ TP_ARGS(pt),
+
+ TP_STRUCT__entry(
+ __string(timeline, pt->parent->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(timeline, pt->parent->name);
+ if (pt->parent->ops->pt_value_str) {
+ pt->parent->ops->pt_value_str(pt,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
+ );
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8046acce74d7..c1e75a9b34e0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -196,11 +196,10 @@ void update_packing_domain(int cpu)
while (sd) {
struct sched_group *sg = sd->groups;
struct sched_group *pack = sg;
- struct sched_group *tmp = sg->next;
+ struct sched_group *tmp;
- /* 1st CPU of the sched domain is a good candidate */
- if (id == -1)
- id = cpumask_first(sched_domain_span(sd));
+ /* The 1st CPU of the local group is a good candidate */
+ id = cpumask_first(sched_group_cpus(pack));
/* Find sched group of candidate */
tmp = sd->groups;
@@ -215,16 +214,22 @@ void update_packing_domain(int cpu)
tmp = sg->next;
/* loop the sched groups to find the best one */
- while (tmp != sg) {
- if (tmp->sgp->power * sg->group_weight <
- sg->sgp->power * tmp->group_weight)
- pack = tmp;
- tmp = tmp->next;
- }
+ for (tmp = sg->next; tmp != sg; tmp = tmp->next) {
+ if (tmp->sgp->power * pack->group_weight >
+ pack->sgp->power * tmp->group_weight)
+ continue;
+
+ if ((tmp->sgp->power * pack->group_weight ==
+ pack->sgp->power * tmp->group_weight)
+ && (cpumask_first(sched_group_cpus(tmp)) >= id))
+ continue;
+
+ /* we have found a better group */
+ pack = tmp;
- /* we have found a better group */
- if (pack != sg)
+ /* Take the 1st CPU of the new group */
id = cpumask_first(sched_group_cpus(pack));
+ }
/* Look for another CPU than itself */
if ((id != cpu)
@@ -3635,6 +3640,80 @@ static int hmp_attr_init(void)
}
late_initcall(hmp_attr_init);
#endif /* CONFIG_HMP_VARIABLE_SCALE */
+
+static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
+ int *min_cpu)
+{
+ int cpu;
+ int min_load = INT_MAX;
+ int min_cpu_temp = NR_CPUS;
+
+ for_each_cpu_mask(cpu, hmpd->cpus) {
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
+ min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
+ min_cpu_temp = cpu;
+ }
+ }
+
+ if (min_cpu)
+ *min_cpu = min_cpu_temp;
+
+ return min_load;
+}
+
+/*
+ * Calculate the task starvation
+ * This is the ratio of actually running time vs. runnable time.
+ * If the two are equal the task is getting the cpu time it needs or
+ * it is alone on the cpu and the cpu is fully utilized.
+ */
+static inline unsigned int hmp_task_starvation(struct sched_entity *se)
+{
+ u32 starvation;
+
+ starvation = se->avg.usage_avg_sum * scale_load_down(NICE_0_LOAD);
+ starvation /= (se->avg.runnable_avg_sum + 1);
+
+ return scale_load(starvation);
+}
+
+static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
+{
+ int min_usage;
+ int dest_cpu = NR_CPUS;
+
+ if (hmp_cpu_is_slowest(cpu))
+ return NR_CPUS;
+
+ /* Is the current domain fully loaded? */
+ /* load < ~94% */
+ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
+ if (min_usage < NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the cpu oversubscribed? */
+ /* load < ~194% */
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the task alone on the cpu? */
+ if (cpu_rq(cpu)->cfs.nr_running < 2)
+ return NR_CPUS;
+
+ /* Is the task actually starving? */
+ if (hmp_task_starvation(se) > 768) /* <25% waiting */
+ return NR_CPUS;
+
+ /* Does the slower domain have spare cycles? */
+ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
+ /* load > 50% */
+ if (min_usage > NICE_0_LOAD/2)
+ return NR_CPUS;
+
+ if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
+ return dest_cpu;
+ return NR_CPUS;
+}
#endif /* CONFIG_SCHED_HMP */
static inline bool is_buddy_busy(int cpu)
@@ -6098,10 +6177,14 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
< hmp_next_up_threshold)
return 0;
- if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(p))
- && se->avg.load_avg_ratio > hmp_up_threshold) {
- return 1;
+ if (se->avg.load_avg_ratio > hmp_up_threshold) {
+ /* Target domain load < ~94% */
+ if (hmp_domain_min_load(hmp_faster_domain(cpu), NULL)
+ > NICE_0_LOAD-64)
+ return 0;
+ if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
+ tsk_cpus_allowed(p)))
+ return 1;
}
return 0;
}
@@ -6324,6 +6407,21 @@ static void hmp_force_up_migration(int this_cpu)
hmp_next_up_delay(&p->se, target->push_cpu);
}
}
+ if (!force && !target->active_balance) {
+ /*
+ * For now we just check the currently running task.
+ * Selecting the lightest task for offloading will
+ * require extensive book keeping.
+ */
+ target->push_cpu = hmp_offload_down(cpu, curr);
+ if (target->push_cpu < NR_CPUS) {
+ target->active_balance = 1;
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 2);
+ hmp_next_down_delay(&p->se, target->push_cpu);
+ }
+ }
raw_spin_unlock_irqrestore(&target->lock, flags);
if (force)
stop_one_cpu_nowait(cpu_of(target),
diff --git a/linaro/configs/linaro-base.conf b/linaro/configs/linaro-base.conf
index eaf0196a127f..23638c3d214b 100644
--- a/linaro/configs/linaro-base.conf
+++ b/linaro/configs/linaro-base.conf
@@ -87,3 +87,4 @@ CONFIG_CRC7=y
CONFIG_HW_PERF_EVENTS=y
CONFIG_FUNCTION_TRACER=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_PROC_DEVICETREE=y
diff --git a/linaro/configs/omap4.conf b/linaro/configs/omap4.conf
index b20f5454bcd5..1cb9960dbb2b 100644
--- a/linaro/configs/omap4.conf
+++ b/linaro/configs/omap4.conf
@@ -145,6 +145,8 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_TWL4030=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
CONFIG_VIRTIO_MMIO=y
CONFIG_STAGING=y
CONFIG_DRM_OMAP=y
diff --git a/linaro/configs/ubuntu.conf b/linaro/configs/ubuntu.conf
index fafabe1ceb57..b65be649bb18 100644
--- a/linaro/configs/ubuntu.conf
+++ b/linaro/configs/ubuntu.conf
@@ -721,7 +721,6 @@ CONFIG_MTD_LPDDR=m
CONFIG_MTD_QINFO_PROBE=m
CONFIG_DTC=y
CONFIG_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_OF_FLATTREE=y
CONFIG_OF_EARLY_FLATTREE=y
CONFIG_OF_ADDRESS=y
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index f6d4cfc05f3c..223fa8be8e85 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1461,6 +1461,8 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
* - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
*/
new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+ if (!new_tag_stat)
+ goto unlock;
uid_tag_counters = &new_tag_stat->counters;
} else {
uid_tag_counters = &tag_stat_entry->counters;
@@ -1469,6 +1471,8 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
if (acct_tag) {
/* Create the child {acct_tag, uid_tag} and hook up parent. */
new_tag_stat = create_if_tag_stat(iface_entry, tag);
+ if (!new_tag_stat)
+ goto unlock;
new_tag_stat->parent_counters = uid_tag_counters;
} else {
/*
@@ -1482,6 +1486,7 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
BUG_ON(!new_tag_stat);
}
tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
spin_unlock_bh(&iface_entry->tag_stat_list_lock);
}
@@ -2588,8 +2593,9 @@ static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
} else {
tag_t tag = ppi->ts_entry->tn.tag;
uid_t stat_uid = get_uid_from_tag(tag);
-
- if (!can_read_other_uid_stats(stat_uid)) {
+ /* Detailed tags are not available to everybody */
+ if (get_atag_from_tag(tag)
+ && !can_read_other_uid_stats(stat_uid)) {
CT_DEBUG("qtaguid: stats line: "
"%s 0x%llx %u: insufficient priv "
"from pid=%u tgid=%u uid=%u\n",
@@ -2751,7 +2757,7 @@ static int qtudev_open(struct inode *inode, struct file *file)
utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
if (IS_ERR_OR_NULL(utd_entry)) {
res = PTR_ERR(utd_entry);
- goto err;
+ goto err_unlock;
}
/* Look for existing PID based proc_data */
@@ -2793,8 +2799,8 @@ err_unlock_free_utd:
rb_erase(&utd_entry->node, &uid_tag_data_tree);
kfree(utd_entry);
}
+err_unlock:
spin_unlock_bh(&uid_tag_data_tree_lock);
-err:
return res;
}
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
index 3a9c1f9475ce..8163f370d04b 100644
--- a/net/netfilter/xt_quota2.c
+++ b/net/netfilter/xt_quota2.c
@@ -350,14 +350,15 @@ static struct xt_match quota_mt2_reg[] __read_mostly = {
static int __init quota_mt2_init(void)
{
int ret;
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
struct netlink_kernel_cfg cfg = {
.groups = 1,
};
+#endif
pr_debug("xt_quota2: init()");
#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
- nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
- THIS_MODULE, &cfg);
+ nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
if (!nflognl)
return -ENOMEM;
#endif
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 974d5cb7e30a..05274fccb88e 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -32,11 +32,13 @@ usage() {
echo " -m only merge the fragments, do not execute the make command"
echo " -n use allnoconfig instead of alldefconfig"
echo " -r list redundant entries when merging fragments"
+ echo " -O dir to put generated output files"
}
MAKE=true
ALLTARGET=alldefconfig
WARNREDUN=false
+OUTPUT=.
while true; do
case $1 in
@@ -59,6 +61,16 @@ while true; do
shift
continue
;;
+ "-O")
+ if [ -d $2 ];then
+ OUTPUT=$(echo $2 | sed 's/\/*$//')
+ else
+ echo "output directory $2 does not exist" 1>&2
+ exit 1
+ fi
+ shift 2
+ continue
+ ;;
*)
break
;;
@@ -100,9 +112,9 @@ for MERGE_FILE in $MERGE_LIST ; do
done
if [ "$MAKE" = "false" ]; then
- cp $TMP_FILE .config
+ cp $TMP_FILE $OUTPUT/.config
echo "#"
- echo "# merged configuration written to .config (needs make)"
+ echo "# merged configuration written to $OUTPUT/.config (needs make)"
echo "#"
clean_up
exit
@@ -111,14 +123,14 @@ fi
# Use the merged file as the starting point for:
# alldefconfig: Fills in any missing symbols with Kconfig default
# allnoconfig: Fills in any missing symbols with # CONFIG_* is not set
-make KCONFIG_ALLCONFIG=$TMP_FILE $ALLTARGET
+make KCONFIG_ALLCONFIG=$TMP_FILE O=$OUTPUT $ALLTARGET
# Check all specified config values took (might have missed-dependency issues)
for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
- ACTUAL_VAL=$(grep -w -e "$CFG" .config)
+ ACTUAL_VAL=$(grep -w -e "$CFG" $OUTPUT/.config)
if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then
echo "Value requested for $CFG not in final .config"
echo "Requested value: $REQUESTED_VAL"
diff --git a/tools/perf/compat-android.h b/tools/perf/compat-android.h
index b8fb93679329..fce9573d257b 100644
--- a/tools/perf/compat-android.h
+++ b/tools/perf/compat-android.h
@@ -20,13 +20,35 @@
#ifndef _COMPAT_ANDROID_H_
#define _COMPAT_ANDROID_H_ 1
+/* Stuff Bionic assumes to be present, but that doesn't exist
+ * anymore after the uabi kernel header reorg
+ */
+#include <stdint.h>
+#include <stdbool.h>
+typedef unsigned short __kernel_nlink_t;
+typedef intptr_t phys_addr_t;
+#include <linux/types.h>
+typedef uint32_t u32;
+typedef uint64_t u64;
+#ifndef CONFIG_DRAM_BASEUL
+#ifdef CONFIG_DRAM_BASE
+#define CONFIG_DRAM_BASEUL UL(CONFIG_DRAM_BASE)
+#else
+#define CONFIG_DRAM_BASEUL 0
+#endif
+#endif
+#define __deprecated
+#include <asm-generic/bitsperlong.h>
+
+#undef BITS_PER_LONG /* Something seems to define this incorrectly */
+#define BITS_PER_LONG _BITSIZE
+
#include <stdio.h>
#include <signal.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/termios.h> /* for winsize */
#ifndef __WORDSIZE
-#include <stdint.h>
#define __WORDSIZE _BITSIZE
#endif
@@ -42,6 +64,11 @@
#define __le32 uint32_t
#endif
+#ifndef FD_SET
+#define FD_SET(fd, fdsetp) (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31)))
+#define FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp))))
+#endif
+
/* Assorted functions that are missing from Bionic */
/* Android prior to 4.2 lacks psignal().
* What we're doing here is fairly evil - but necessary since
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index 3322b8446e89..cd543418c721 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -1,6 +1,7 @@
#include "cache.h"
#include "run-command.h"
#include "sigchain.h"
+#include <sys/select.h>
/*
* This is split up from the rest of git so that we can do
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 10e8a478ae4c..ebee70529cdc 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -253,11 +253,13 @@ void event_attr_init(struct perf_event_attr *attr);
* *not* considered a power of two.
*/
+#ifndef _LINUX_LOG2_H // defined there too
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
+#endif
size_t hex_width(u64 v);