diff options
Diffstat (limited to 'drivers/staging')
50 files changed, 7281 insertions, 5911 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index c0c95be0f969..b3c054e4fd58 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -8,17 +8,6 @@ config ANDROID if ANDROID -config ANDROID_BINDER_IPC - bool "Android Binder IPC Driver" - default n - ---help--- - Binder is used in Android for both communication between processes, - and remote method invocation. - - This means one Android process can call a method/routine in another - Android process, using Binder to identify, invoke and pass arguments - between said processes. - config ASHMEM bool "Enable the Anonymous Shared Memory Subsystem" default n @@ -31,23 +20,6 @@ config ASHMEM It is, in theory, a good memory allocator for low-memory devices, because it can discard shared memory units when under memory pressure. -config ANDROID_LOGGER - tristate "Android log driver" - default n - ---help--- - This adds support for system-wide logging using four log buffers. - - These are: - - 1: main - 2: events - 3: radio - 4: system - - Log reading and writing is performed via normal Linux reads and - optimized writes. This optimization avoids logging having too - much overhead in the system. - config ANDROID_TIMED_OUTPUT bool "Timed output class driver" default y @@ -63,14 +35,14 @@ config ANDROID_LOW_MEMORY_KILLER ---help--- Registers processes to be killed when memory is low -config ANDROID_INTF_ALARM_DEV - bool "Android alarm driver" - depends on RTC_CLASS - default n +config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES + bool "Android Low Memory Killer: detect oom_adj values" + depends on ANDROID_LOW_MEMORY_KILLER + default y ---help--- - Provides non-wakeup and rtc backed wakeup alarms based on rtc or - elapsed realtime, and a non-wakeup alarm on the monotonic clock. - Also exports the alarm interface to user-space. + Detect oom_adj values written to + /sys/module/lowmemorykiller/parameters/adj and convert them + to oom_score_adj values. config SYNC bool "Synchronization framework" @@ -99,6 +71,10 @@ config SW_SYNC_USER *WARNING* improper use of this can result in deadlocking kernel drivers from userspace. +source "drivers/staging/android/ion/Kconfig" + +source "drivers/staging/android/fiq_debugger/Kconfig" + endif # if ANDROID endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index c136299e05af..30ba2f0cb084 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,11 +1,11 @@ ccflags-y += -I$(src) # needed for trace events -obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o +obj-y += ion/ +obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/ + obj-$(CONFIG_ASHMEM) += ashmem.o -obj-$(CONFIG_ANDROID_LOGGER) += logger.o obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o -obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o obj-$(CONFIG_SYNC) += sync.o obj-$(CONFIG_SW_SYNC) += sw_sync.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO deleted file mode 100644 index b15fb0d6b152..000000000000 --- a/drivers/staging/android/TODO +++ /dev/null @@ -1,10 +0,0 @@ -TODO: - - checkpatch.pl cleanups - - sparse fixes - - rename files to be not so "generic" - - make sure things build as modules properly - - add proper arch dependencies as needed - - audit userspace interfaces to make sure they are sane - -Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: -Brian Swetland <swetland@google.com> diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c deleted file mode 100644 index 6dc27dac679d..000000000000 --- a/drivers/staging/android/alarm-dev.c +++ /dev/null @@ -1,443 +0,0 @@ -/* drivers/rtc/alarm-dev.c - * - * Copyright (C) 2007-2009 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/time.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/miscdevice.h> -#include <linux/fs.h> -#include <linux/platform_device.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/uaccess.h> -#include <linux/alarmtimer.h> -#include "android_alarm.h" - -#define ANDROID_ALARM_PRINT_INFO (1U << 0) -#define ANDROID_ALARM_PRINT_IO (1U << 1) -#define ANDROID_ALARM_PRINT_INT (1U << 2) - -static int debug_mask = ANDROID_ALARM_PRINT_INFO; -module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); - -#define alarm_dbg(debug_level_mask, fmt, ...) \ -do { \ - if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) \ - pr_info(fmt, ##__VA_ARGS__); \ -} while (0) - -#define ANDROID_ALARM_WAKEUP_MASK ( \ - ANDROID_ALARM_RTC_WAKEUP_MASK | \ - ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) - -static int alarm_opened; -static DEFINE_SPINLOCK(alarm_slock); -static struct wakeup_source alarm_wake_lock; -static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue); -static uint32_t alarm_pending; -static uint32_t alarm_enabled; -static uint32_t wait_pending; - -struct devalarm { - union { - struct hrtimer hrt; - struct alarm alrm; - } u; - enum android_alarm_type type; -}; - -static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT]; - - -static int is_wakeup(enum android_alarm_type type) -{ - return (type == ANDROID_ALARM_RTC_WAKEUP || - type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); -} - - -static void devalarm_start(struct devalarm *alrm, ktime_t exp) -{ - if (is_wakeup(alrm->type)) - alarm_start(&alrm->u.alrm, exp); - else - hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS); -} - - -static int devalarm_try_to_cancel(struct devalarm *alrm) -{ - if (is_wakeup(alrm->type)) - return alarm_try_to_cancel(&alrm->u.alrm); - return hrtimer_try_to_cancel(&alrm->u.hrt); -} - -static void devalarm_cancel(struct devalarm *alrm) -{ - if (is_wakeup(alrm->type)) - alarm_cancel(&alrm->u.alrm); - else - hrtimer_cancel(&alrm->u.hrt); -} - -static void alarm_clear(enum android_alarm_type alarm_type) -{ - uint32_t alarm_type_mask = 1U << alarm_type; - unsigned long flags; - - spin_lock_irqsave(&alarm_slock, flags); - alarm_dbg(IO, "alarm %d clear\n", alarm_type); - devalarm_try_to_cancel(&alarms[alarm_type]); - if (alarm_pending) { - alarm_pending &= ~alarm_type_mask; - if (!alarm_pending && !wait_pending) - __pm_relax(&alarm_wake_lock); - } - alarm_enabled &= ~alarm_type_mask; - spin_unlock_irqrestore(&alarm_slock, flags); - -} - -static void alarm_set(enum android_alarm_type alarm_type, - struct timespec *ts) -{ - uint32_t alarm_type_mask = 1U << alarm_type; - unsigned long flags; - - spin_lock_irqsave(&alarm_slock, flags); - alarm_dbg(IO, "alarm %d set %ld.%09ld\n", - alarm_type, ts->tv_sec, ts->tv_nsec); - alarm_enabled |= alarm_type_mask; - devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts)); - spin_unlock_irqrestore(&alarm_slock, flags); -} - -static int alarm_wait(void) -{ - unsigned long flags; - int rv = 0; - - spin_lock_irqsave(&alarm_slock, flags); - alarm_dbg(IO, "alarm wait\n"); - if (!alarm_pending && wait_pending) { - __pm_relax(&alarm_wake_lock); - wait_pending = 0; - } - spin_unlock_irqrestore(&alarm_slock, flags); - - rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); - if (rv) - return rv; - - spin_lock_irqsave(&alarm_slock, flags); - rv = alarm_pending; - wait_pending = 1; - alarm_pending = 0; - spin_unlock_irqrestore(&alarm_slock, flags); - - return rv; -} - -static int alarm_set_rtc(struct timespec *ts) -{ - struct rtc_time new_rtc_tm; - struct rtc_device *rtc_dev; - unsigned long flags; - int rv = 0; - - rtc_time_to_tm(ts->tv_sec, &new_rtc_tm); - rtc_dev = alarmtimer_get_rtcdev(); - rv = do_settimeofday(ts); - if (rv < 0) - return rv; - if (rtc_dev) - rv = rtc_set_time(rtc_dev, &new_rtc_tm); - - spin_lock_irqsave(&alarm_slock, flags); - alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK; - wake_up(&alarm_wait_queue); - spin_unlock_irqrestore(&alarm_slock, flags); - - return rv; -} - -static int alarm_get_time(enum android_alarm_type alarm_type, - struct timespec *ts) -{ - int rv = 0; - - switch (alarm_type) { - case ANDROID_ALARM_RTC_WAKEUP: - case ANDROID_ALARM_RTC: - getnstimeofday(ts); - break; - case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP: - case ANDROID_ALARM_ELAPSED_REALTIME: - get_monotonic_boottime(ts); - break; - case ANDROID_ALARM_SYSTEMTIME: - ktime_get_ts(ts); - break; - default: - rv = -EINVAL; - } - return rv; -} - -static long alarm_do_ioctl(struct file *file, unsigned int cmd, - struct timespec *ts) -{ - int rv = 0; - unsigned long flags; - enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd); - - if (alarm_type >= ANDROID_ALARM_TYPE_COUNT) - return -EINVAL; - - if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) { - if ((file->f_flags & O_ACCMODE) == O_RDONLY) - return -EPERM; - if (file->private_data == NULL && - cmd != ANDROID_ALARM_SET_RTC) { - spin_lock_irqsave(&alarm_slock, flags); - if (alarm_opened) { - spin_unlock_irqrestore(&alarm_slock, flags); - return -EBUSY; - } - alarm_opened = 1; - file->private_data = (void *)1; - spin_unlock_irqrestore(&alarm_slock, flags); - } - } - - switch (ANDROID_ALARM_BASE_CMD(cmd)) { - case ANDROID_ALARM_CLEAR(0): - alarm_clear(alarm_type); - break; - case ANDROID_ALARM_SET(0): - alarm_set(alarm_type, ts); - break; - case ANDROID_ALARM_SET_AND_WAIT(0): - alarm_set(alarm_type, ts); - /* fall though */ - case ANDROID_ALARM_WAIT: - rv = alarm_wait(); - break; - case ANDROID_ALARM_SET_RTC: - rv = alarm_set_rtc(ts); - break; - case ANDROID_ALARM_GET_TIME(0): - rv = alarm_get_time(alarm_type, ts); - break; - - default: - rv = -EINVAL; - } - return rv; -} - -static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - - struct timespec ts; - int rv; - - switch (ANDROID_ALARM_BASE_CMD(cmd)) { - case ANDROID_ALARM_SET_AND_WAIT(0): - case ANDROID_ALARM_SET(0): - case ANDROID_ALARM_SET_RTC: - if (copy_from_user(&ts, (void __user *)arg, sizeof(ts))) - return -EFAULT; - break; - } - - rv = alarm_do_ioctl(file, cmd, &ts); - if (rv) - return rv; - - switch (ANDROID_ALARM_BASE_CMD(cmd)) { - case ANDROID_ALARM_GET_TIME(0): - if (copy_to_user((void __user *)arg, &ts, sizeof(ts))) - return -EFAULT; - break; - } - - return 0; -} -#ifdef CONFIG_COMPAT -static long alarm_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - - struct timespec ts; - int rv; - - switch (ANDROID_ALARM_BASE_CMD(cmd)) { - case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0): - case ANDROID_ALARM_SET_COMPAT(0): - case ANDROID_ALARM_SET_RTC_COMPAT: - if (compat_get_timespec(&ts, (void __user *)arg)) - return -EFAULT; - /* fall through */ - case ANDROID_ALARM_GET_TIME_COMPAT(0): - cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd); - break; - } - - rv = alarm_do_ioctl(file, cmd, &ts); - if (rv) - return rv; - - switch (ANDROID_ALARM_BASE_CMD(cmd)) { - case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */ - if (compat_put_timespec(&ts, (void __user *)arg)) - return -EFAULT; - break; - } - - return 0; -} -#endif - -static int alarm_open(struct inode *inode, struct file *file) -{ - file->private_data = NULL; - return 0; -} - -static int alarm_release(struct inode *inode, struct file *file) -{ - int i; - unsigned long flags; - - spin_lock_irqsave(&alarm_slock, flags); - if (file->private_data) { - for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { - uint32_t alarm_type_mask = 1U << i; - if (alarm_enabled & alarm_type_mask) { - alarm_dbg(INFO, - "%s: clear alarm, pending %d\n", - __func__, - !!(alarm_pending & alarm_type_mask)); - alarm_enabled &= ~alarm_type_mask; - } - spin_unlock_irqrestore(&alarm_slock, flags); - devalarm_cancel(&alarms[i]); - spin_lock_irqsave(&alarm_slock, flags); - } - if (alarm_pending | wait_pending) { - if (alarm_pending) - alarm_dbg(INFO, "%s: clear pending alarms %x\n", - __func__, alarm_pending); - __pm_relax(&alarm_wake_lock); - wait_pending = 0; - alarm_pending = 0; - } - alarm_opened = 0; - } - spin_unlock_irqrestore(&alarm_slock, flags); - return 0; -} - -static void devalarm_triggered(struct devalarm *alarm) -{ - unsigned long flags; - uint32_t alarm_type_mask = 1U << alarm->type; - - alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type); - spin_lock_irqsave(&alarm_slock, flags); - if (alarm_enabled & alarm_type_mask) { - __pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */ - alarm_enabled &= ~alarm_type_mask; - alarm_pending |= alarm_type_mask; - wake_up(&alarm_wait_queue); - } - spin_unlock_irqrestore(&alarm_slock, flags); -} - - -static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt) -{ - struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt); - - devalarm_triggered(devalrm); - return HRTIMER_NORESTART; -} - -static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm, - ktime_t now) -{ - struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm); - - devalarm_triggered(devalrm); - return ALARMTIMER_NORESTART; -} - - -static const struct file_operations alarm_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = alarm_ioctl, - .open = alarm_open, - .release = alarm_release, -#ifdef CONFIG_COMPAT - .compat_ioctl = alarm_compat_ioctl, -#endif -}; - -static struct miscdevice alarm_device = { - .minor = MISC_DYNAMIC_MINOR, - .name = "alarm", - .fops = &alarm_fops, -}; - -static int __init alarm_dev_init(void) -{ - int err; - int i; - - err = misc_register(&alarm_device); - if (err) - return err; - - alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm, - ALARM_REALTIME, devalarm_alarmhandler); - hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt, - CLOCK_REALTIME, HRTIMER_MODE_ABS); - alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm, - ALARM_BOOTTIME, devalarm_alarmhandler); - hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt, - CLOCK_BOOTTIME, HRTIMER_MODE_ABS); - hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - - for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { - alarms[i].type = i; - if (!is_wakeup(i)) - alarms[i].u.hrt.function = devalarm_hrthandler; - } - - wakeup_source_init(&alarm_wake_lock, "alarm"); - return 0; -} - -static void __exit alarm_dev_exit(void) -{ - misc_deregister(&alarm_device); - wakeup_source_trash(&alarm_wake_lock); -} - -module_init(alarm_dev_init); -module_exit(alarm_dev_exit); - diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h deleted file mode 100644 index 4fd32f337f9c..000000000000 --- a/drivers/staging/android/android_alarm.h +++ /dev/null @@ -1,81 +0,0 @@ -/* include/linux/android_alarm.h - * - * Copyright (C) 2006-2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _LINUX_ANDROID_ALARM_H -#define _LINUX_ANDROID_ALARM_H - -#include <linux/ioctl.h> -#include <linux/time.h> -#include <linux/compat.h> - -enum android_alarm_type { - /* return code bit numbers or set alarm arg */ - ANDROID_ALARM_RTC_WAKEUP, - ANDROID_ALARM_RTC, - ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, - ANDROID_ALARM_ELAPSED_REALTIME, - ANDROID_ALARM_SYSTEMTIME, - - ANDROID_ALARM_TYPE_COUNT, - - /* return code bit numbers */ - /* ANDROID_ALARM_TIME_CHANGE = 16 */ -}; - -enum android_alarm_return_flags { - ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, - ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, - ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = - 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, - ANDROID_ALARM_ELAPSED_REALTIME_MASK = - 1U << ANDROID_ALARM_ELAPSED_REALTIME, - ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, - ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 -}; - -/* Disable alarm */ -#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4)) - -/* Ack last alarm and wait for next */ -#define ANDROID_ALARM_WAIT _IO('a', 1) - -#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) -/* Set alarm */ -#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) -#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) -#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) -#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) -#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) -#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) - - -#ifdef CONFIG_COMPAT -#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \ - struct compat_timespec) -#define ANDROID_ALARM_SET_AND_WAIT_COMPAT(type) ALARM_IOW(3, type, \ - struct compat_timespec) -#define ANDROID_ALARM_GET_TIME_COMPAT(type) ALARM_IOW(4, type, \ - struct compat_timespec) -#define ANDROID_ALARM_SET_RTC_COMPAT _IOW('a', 5, \ - struct compat_timespec) -#define ANDROID_ALARM_IOCTL_NR(cmd) (_IOC_NR(cmd) & ((1<<4)-1)) -#define ANDROID_ALARM_COMPAT_TO_NORM(cmd) \ - ALARM_IOW(ANDROID_ALARM_IOCTL_NR(cmd), \ - ANDROID_ALARM_IOCTL_TO_TYPE(cmd), \ - struct timespec) - -#endif - -#endif diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index e681bdd9aa5f..ccaef8b48eba 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -224,21 +224,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, /* If size is not set, or set to 0, always return EOF. */ if (asma->size == 0) - goto out; + goto out_unlock; if (!asma->file) { ret = -EBADF; - goto out; + goto out_unlock; } - ret = asma->file->f_op->read(asma->file, buf, len, pos); - if (ret < 0) - goto out; + mutex_unlock(&ashmem_mutex); - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret >= 0) { + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + } + return ret; -out: +out_unlock: mutex_unlock(&ashmem_mutex); return ret; } @@ -317,22 +325,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } get_file(asma->file); - /* - * XXX - Reworked to use shmem_zero_setup() instead of - * shmem_set_file while we're in staging. -jstultz - */ - if (vma->vm_flags & VM_SHARED) { - ret = shmem_zero_setup(vma); - if (ret) { - fput(asma->file); - goto out; - } + if (vma->vm_flags & VM_SHARED) + shmem_set_file(vma, asma->file); + else { + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; } - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = asma->file; - out: mutex_unlock(&ashmem_mutex); return ret; @@ -363,7 +363,9 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc) if (!sc->nr_to_scan) return lru_count; - mutex_lock(&ashmem_mutex); + if (!mutex_trylock(&ashmem_mutex)) + return -1; + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; @@ -413,6 +415,7 @@ out: static int set_name(struct ashmem_area *asma, void __user *name) { + int len; int ret = 0; char local_name[ASHMEM_NAME_LEN]; @@ -425,21 +428,19 @@ static int set_name(struct ashmem_area *asma, void __user *name) * variable that does not need protection and later copy the local * variable to the structure member with lock held. */ - if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) - return -EFAULT; - + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; mutex_lock(&ashmem_mutex); /* cannot change an existing mapping's name */ - if (unlikely(asma->file)) { + if (unlikely(asma->file)) ret = -EINVAL; - goto out; - } - memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, - local_name, ASHMEM_NAME_LEN); - asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; -out: - mutex_unlock(&ashmem_mutex); + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + mutex_unlock(&ashmem_mutex); return ret; } diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h index 8dc0f0d3adf3..5abcfd7aa706 100644 --- a/drivers/staging/android/ashmem.h +++ b/drivers/staging/android/ashmem.h @@ -16,35 +16,7 @@ #include <linux/ioctl.h> #include <linux/compat.h> -#define ASHMEM_NAME_LEN 256 - -#define ASHMEM_NAME_DEF "dev/ashmem" - -/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ -#define ASHMEM_NOT_PURGED 0 -#define ASHMEM_WAS_PURGED 1 - -/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ -#define ASHMEM_IS_UNPINNED 0 -#define ASHMEM_IS_PINNED 1 - -struct ashmem_pin { - __u32 offset; /* offset into region, in bytes, page-aligned */ - __u32 len; /* length forward from offset, in bytes, page-aligned */ -}; - -#define __ASHMEMIOC 0x77 - -#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) -#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) -#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) -#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) -#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) -#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) -#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) -#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) -#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) -#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) +#include "uapi/ashmem.h" /* support of 32bit userspace on 64bit platforms */ #ifdef CONFIG_COMPAT diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c deleted file mode 100644 index 0fce5fc9923b..000000000000 --- a/drivers/staging/android/binder.c +++ /dev/null @@ -1,3562 +0,0 @@ -/* binder.c - * - * Android IPC Subsystem - * - * Copyright (C) 2007-2008 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <asm/cacheflush.h> -#include <linux/fdtable.h> -#include <linux/file.h> -#include <linux/fs.h> -#include <linux/list.h> -#include <linux/miscdevice.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/nsproxy.h> -#include <linux/poll.h> -#include <linux/debugfs.h> -#include <linux/rbtree.h> -#include <linux/sched.h> -#include <linux/seq_file.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/pid_namespace.h> - -#include "binder.h" -#include "binder_trace.h" - -static DEFINE_MUTEX(binder_main_lock); -static DEFINE_MUTEX(binder_deferred_lock); -static DEFINE_MUTEX(binder_mmap_lock); - -static HLIST_HEAD(binder_procs); -static HLIST_HEAD(binder_deferred_list); -static HLIST_HEAD(binder_dead_nodes); - -static struct dentry *binder_debugfs_dir_entry_root; -static struct dentry *binder_debugfs_dir_entry_proc; -static struct binder_node *binder_context_mgr_node; -static kuid_t binder_context_mgr_uid = INVALID_UID; -static int binder_last_id; -static struct workqueue_struct *binder_deferred_workqueue; - -#define BINDER_DEBUG_ENTRY(name) \ -static int binder_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, binder_##name##_show, inode->i_private); \ -} \ -\ -static const struct file_operations binder_##name##_fops = { \ - .owner = THIS_MODULE, \ - .open = binder_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -static int binder_proc_show(struct seq_file *m, void *unused); -BINDER_DEBUG_ENTRY(proc); - -/* This is only defined in include/asm-arm/sizes.h */ -#ifndef SZ_1K -#define SZ_1K 0x400 -#endif - -#ifndef SZ_4M -#define SZ_4M 0x400000 -#endif - -#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) - -#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) - -enum { - BINDER_DEBUG_USER_ERROR = 1U << 0, - BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, - BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, - BINDER_DEBUG_OPEN_CLOSE = 1U << 3, - BINDER_DEBUG_DEAD_BINDER = 1U << 4, - BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, - BINDER_DEBUG_READ_WRITE = 1U << 6, - BINDER_DEBUG_USER_REFS = 1U << 7, - BINDER_DEBUG_THREADS = 1U << 8, - BINDER_DEBUG_TRANSACTION = 1U << 9, - BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, - BINDER_DEBUG_FREE_BUFFER = 1U << 11, - BINDER_DEBUG_INTERNAL_REFS = 1U << 12, - BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, - BINDER_DEBUG_PRIORITY_CAP = 1U << 14, - BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, -}; -static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | - BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); - -static bool binder_debug_no_lock; -module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); - -static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); -static int binder_stop_on_user_error; - -static int binder_set_stop_on_user_error(const char *val, - struct kernel_param *kp) -{ - int ret; - ret = param_set_int(val, kp); - if (binder_stop_on_user_error < 2) - wake_up(&binder_user_error_wait); - return ret; -} -module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); - -#define binder_debug(mask, x...) \ - do { \ - if (binder_debug_mask & mask) \ - pr_info(x); \ - } while (0) - -#define binder_user_error(x...) \ - do { \ - if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info(x); \ - if (binder_stop_on_user_error) \ - binder_stop_on_user_error = 2; \ - } while (0) - -enum binder_stat_types { - BINDER_STAT_PROC, - BINDER_STAT_THREAD, - BINDER_STAT_NODE, - BINDER_STAT_REF, - BINDER_STAT_DEATH, - BINDER_STAT_TRANSACTION, - BINDER_STAT_TRANSACTION_COMPLETE, - BINDER_STAT_COUNT -}; - -struct binder_stats { - int br[_IOC_NR(BR_FAILED_REPLY) + 1]; - int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; - int obj_created[BINDER_STAT_COUNT]; - int obj_deleted[BINDER_STAT_COUNT]; -}; - -static struct binder_stats binder_stats; - -static inline void binder_stats_deleted(enum binder_stat_types type) -{ - binder_stats.obj_deleted[type]++; -} - -static inline void binder_stats_created(enum binder_stat_types type) -{ - binder_stats.obj_created[type]++; -} - -struct binder_transaction_log_entry { - int debug_id; - int call_type; - int from_proc; - int from_thread; - int target_handle; - int to_proc; - int to_thread; - int to_node; - int data_size; - int offsets_size; -}; -struct binder_transaction_log { - int next; - int full; - struct binder_transaction_log_entry entry[32]; -}; -static struct binder_transaction_log binder_transaction_log; -static struct binder_transaction_log binder_transaction_log_failed; - -static struct binder_transaction_log_entry *binder_transaction_log_add( - struct binder_transaction_log *log) -{ - struct binder_transaction_log_entry *e; - e = &log->entry[log->next]; - memset(e, 0, sizeof(*e)); - log->next++; - if (log->next == ARRAY_SIZE(log->entry)) { - log->next = 0; - log->full = 1; - } - return e; -} - -struct binder_work { - struct list_head entry; - enum { - BINDER_WORK_TRANSACTION = 1, - BINDER_WORK_TRANSACTION_COMPLETE, - BINDER_WORK_NODE, - BINDER_WORK_DEAD_BINDER, - BINDER_WORK_DEAD_BINDER_AND_CLEAR, - BINDER_WORK_CLEAR_DEATH_NOTIFICATION, - } type; -}; - -struct binder_node { - int debug_id; - struct binder_work work; - union { - struct rb_node rb_node; - struct hlist_node dead_node; - }; - struct binder_proc *proc; - struct hlist_head refs; - int internal_strong_refs; - int local_weak_refs; - int local_strong_refs; - void __user *ptr; - void __user *cookie; - unsigned has_strong_ref:1; - unsigned pending_strong_ref:1; - unsigned has_weak_ref:1; - unsigned pending_weak_ref:1; - unsigned has_async_transaction:1; - unsigned accept_fds:1; - unsigned min_priority:8; - struct list_head async_todo; -}; - -struct binder_ref_death { - struct binder_work work; - void __user *cookie; -}; - -struct binder_ref { - /* Lookups needed: */ - /* node + proc => ref (transaction) */ - /* desc + proc => ref (transaction, inc/dec ref) */ - /* node => refs + procs (proc exit) */ - int debug_id; - struct rb_node rb_node_desc; - struct rb_node rb_node_node; - struct hlist_node node_entry; - struct binder_proc *proc; - struct binder_node *node; - uint32_t desc; - int strong; - int weak; - struct binder_ref_death *death; -}; - -struct binder_buffer { - struct list_head entry; /* free and allocated entries by address */ - struct rb_node rb_node; /* free entry by size or allocated entry */ - /* by address */ - unsigned free:1; - unsigned allow_user_free:1; - unsigned async_transaction:1; - unsigned debug_id:29; - - struct binder_transaction *transaction; - - struct binder_node *target_node; - size_t data_size; - size_t offsets_size; - uint8_t data[0]; -}; - -enum binder_deferred_state { - BINDER_DEFERRED_PUT_FILES = 0x01, - BINDER_DEFERRED_FLUSH = 0x02, - BINDER_DEFERRED_RELEASE = 0x04, -}; - -struct binder_proc { - struct hlist_node proc_node; - struct rb_root threads; - struct rb_root nodes; - struct rb_root refs_by_desc; - struct rb_root refs_by_node; - int pid; - struct vm_area_struct *vma; - struct mm_struct *vma_vm_mm; - struct task_struct *tsk; - struct files_struct *files; - struct hlist_node deferred_work_node; - int deferred_work; - void *buffer; - ptrdiff_t user_buffer_offset; - - struct list_head buffers; - struct rb_root free_buffers; - struct rb_root allocated_buffers; - size_t free_async_space; - - struct page **pages; - size_t buffer_size; - uint32_t buffer_free; - struct list_head todo; - wait_queue_head_t wait; - struct binder_stats stats; - struct list_head delivered_death; - int max_threads; - int requested_threads; - int requested_threads_started; - int ready_threads; - long default_priority; - struct dentry *debugfs_entry; -}; - -enum { - BINDER_LOOPER_STATE_REGISTERED = 0x01, - BINDER_LOOPER_STATE_ENTERED = 0x02, - BINDER_LOOPER_STATE_EXITED = 0x04, - BINDER_LOOPER_STATE_INVALID = 0x08, - BINDER_LOOPER_STATE_WAITING = 0x10, - BINDER_LOOPER_STATE_NEED_RETURN = 0x20 -}; - -struct binder_thread { - struct binder_proc *proc; - struct rb_node rb_node; - int pid; - int looper; - struct binder_transaction *transaction_stack; - struct list_head todo; - uint32_t return_error; /* Write failed, return error code in read buf */ - uint32_t return_error2; /* Write failed, return error code in read */ - /* buffer. Used when sending a reply to a dead process that */ - /* we are also waiting on */ - wait_queue_head_t wait; - struct binder_stats stats; -}; - -struct binder_transaction { - int debug_id; - struct binder_work work; - struct binder_thread *from; - struct binder_transaction *from_parent; - struct binder_proc *to_proc; - struct binder_thread *to_thread; - struct binder_transaction *to_parent; - unsigned need_reply:1; - /* unsigned is_dead:1; */ /* not used at the moment */ - - struct binder_buffer *buffer; - unsigned int code; - unsigned int flags; - long priority; - long saved_priority; - kuid_t sender_euid; -}; - -static void -binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); - -static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) -{ - struct files_struct *files = proc->files; - unsigned long rlim_cur; - unsigned long irqs; - - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - - rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); - unlock_task_sighand(proc->tsk, &irqs); - - return __alloc_fd(files, 0, rlim_cur, flags); -} - -/* - * copied from fd_install - */ -static void task_fd_install( - struct binder_proc *proc, unsigned int fd, struct file *file) -{ - if (proc->files) - __fd_install(proc->files, fd, file); -} - -/* - * copied from sys_close - */ -static long task_close_fd(struct binder_proc *proc, unsigned int fd) -{ - int retval; - - if (proc->files == NULL) - return -ESRCH; - - retval = __close_fd(proc->files, fd); - /* can't restart close syscall because file table entry was cleared */ - if (unlikely(retval == -ERESTARTSYS || - retval == -ERESTARTNOINTR || - retval == -ERESTARTNOHAND || - retval == -ERESTART_RESTARTBLOCK)) - retval = -EINTR; - - return retval; -} - -static inline void binder_lock(const char *tag) -{ - trace_binder_lock(tag); - mutex_lock(&binder_main_lock); - trace_binder_locked(tag); -} - -static inline void binder_unlock(const char *tag) -{ - trace_binder_unlock(tag); - mutex_unlock(&binder_main_lock); -} - -static void binder_set_nice(long nice) -{ - long min_nice; - if (can_nice(current, nice)) { - set_user_nice(current, nice); - return; - } - min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice < 20) - return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); -} - -static size_t binder_buffer_size(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - if (list_is_last(&buffer->entry, &proc->buffers)) - return proc->buffer + proc->buffer_size - (void *)buffer->data; - else - return (size_t)list_entry(buffer->entry.next, - struct binder_buffer, entry) - (size_t)buffer->data; -} - -static void binder_insert_free_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) -{ - struct rb_node **p = &proc->free_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - size_t buffer_size; - size_t new_buffer_size; - - BUG_ON(!new_buffer->free); - - new_buffer_size = binder_buffer_size(proc, new_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", - proc->pid, new_buffer_size, new_buffer); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - - buffer_size = binder_buffer_size(proc, buffer); - - if (new_buffer_size < buffer_size) - p = &parent->rb_left; - else - p = &parent->rb_right; - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); -} - -static void binder_insert_allocated_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) -{ - struct rb_node **p = &proc->allocated_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - - BUG_ON(new_buffer->free); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (new_buffer < buffer) - p = &parent->rb_left; - else if (new_buffer > buffer) - p = &parent->rb_right; - else - BUG(); - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); -} - -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, - void __user *user_ptr) -{ - struct rb_node *n = proc->allocated_buffers.rb_node; - struct binder_buffer *buffer; - struct binder_buffer *kern_ptr; - - kern_ptr = user_ptr - proc->user_buffer_offset - - offsetof(struct binder_buffer, data); - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (kern_ptr < buffer) - n = n->rb_left; - else if (kern_ptr > buffer) - n = n->rb_right; - else - return buffer; - } - return NULL; -} - -static int binder_update_page_range(struct binder_proc *proc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) -{ - void *page_addr; - unsigned long user_page_addr; - struct vm_struct tmp_area; - struct page **page; - struct mm_struct *mm; - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, - allocate ? "allocate" : "free", start, end); - - if (end <= start) - return 0; - - trace_binder_update_page_range(proc, allocate, start, end); - - if (vma) - mm = NULL; - else - mm = get_task_mm(proc->tsk); - - if (mm) { - down_write(&mm->mmap_sem); - vma = proc->vma; - if (vma && mm != proc->vma_vm_mm) { - pr_err("%d: vma mm and task mm mismatch\n", - proc->pid); - vma = NULL; - } - } - - if (allocate == 0) - goto free_range; - - if (vma == NULL) { - pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - proc->pid); - goto err_no_vma; - } - - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - int ret; - struct page **page_array_ptr; - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - - BUG_ON(*page); - *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", - proc->pid, page_addr); - goto err_alloc_page_failed; - } - tmp_area.addr = page_addr; - tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; - page_array_ptr = page; - ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", - proc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + proc->user_buffer_offset; - ret = vm_insert_page(vma, user_page_addr, page[0]); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - proc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - /* vm_insert_page does not seem to increment the refcount */ - } - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - return 0; - -free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - if (vma) - zap_page_range(vma, (uintptr_t)page_addr + - proc->user_buffer_offset, PAGE_SIZE, NULL); -err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: - __free_page(*page); - *page = NULL; -err_alloc_page_failed: - ; - } -err_no_vma: - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - return -ENOMEM; -} - -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, - size_t data_size, - size_t offsets_size, int is_async) -{ - struct rb_node *n = proc->free_buffers.rb_node; - struct binder_buffer *buffer; - size_t buffer_size; - struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; - size_t size; - - if (proc->vma == NULL) { - pr_err("%d: binder_alloc_buf, no vma\n", - proc->pid); - return NULL; - } - - size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - - if (size < data_size || size < offsets_size) { - binder_user_error("%d: got transaction with invalid size %zd-%zd\n", - proc->pid, data_size, offsets_size); - return NULL; - } - - if (is_async && - proc->free_async_space < size + sizeof(struct binder_buffer)) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd failed, no async space left\n", - proc->pid, size); - return NULL; - } - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); - - if (size < buffer_size) { - best_fit = n; - n = n->rb_left; - } else if (size > buffer_size) - n = n->rb_right; - else { - best_fit = n; - break; - } - } - if (best_fit == NULL) { - pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - proc->pid, size); - return NULL; - } - if (n == NULL) { - buffer = rb_entry(best_fit, struct binder_buffer, rb_node); - buffer_size = binder_buffer_size(proc, buffer); - } - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", - proc->pid, size, buffer, buffer_size); - - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); - if (n == NULL) { - if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) - buffer_size = size; /* no room for other buffers */ - else - buffer_size = size + sizeof(struct binder_buffer); - } - end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - if (binder_update_page_range(proc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) - return NULL; - - rb_erase(best_fit, &proc->free_buffers); - buffer->free = 0; - binder_insert_allocated_buffer(proc, buffer); - if (buffer_size != size) { - struct binder_buffer *new_buffer = (void *)buffer->data + size; - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(proc, new_buffer); - } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", - proc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; - buffer->async_transaction = is_async; - if (is_async) { - proc->free_async_space -= size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_alloc_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - return buffer; -} - -static void *buffer_start_page(struct binder_buffer *buffer) -{ - return (void *)((uintptr_t)buffer & PAGE_MASK); -} - -static void *buffer_end_page(struct binder_buffer *buffer) -{ - return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); -} - -static void binder_delete_free_buffer(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - struct binder_buffer *prev, *next = NULL; - int free_page_end = 1; - int free_page_start = 1; - - BUG_ON(proc->buffers.next == &buffer->entry); - prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); - BUG_ON(!prev->free); - if (buffer_end_page(prev) == buffer_start_page(buffer)) { - free_page_start = 0; - if (buffer_end_page(prev) == buffer_end_page(buffer)) - free_page_end = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - - if (!list_is_last(&buffer->entry, &proc->buffers)) { - next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - if (buffer_start_page(next) == buffer_end_page(buffer)) { - free_page_end = 0; - if (buffer_start_page(next) == - buffer_start_page(buffer)) - free_page_start = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - } - list_del(&buffer->entry); - if (free_page_start || free_page_end) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n", - proc->pid, buffer, free_page_start ? "" : " end", - free_page_end ? "" : " start", prev, next); - binder_update_page_range(proc, 0, free_page_start ? - buffer_start_page(buffer) : buffer_end_page(buffer), - (free_page_end ? buffer_end_page(buffer) : - buffer_start_page(buffer)) + PAGE_SIZE, NULL); - } -} - -static void binder_free_buf(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - size_t size, buffer_size; - - buffer_size = binder_buffer_size(proc, buffer); - - size = ALIGN(buffer->data_size, sizeof(void *)) + - ALIGN(buffer->offsets_size, sizeof(void *)); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", - proc->pid, buffer, size, buffer_size); - - BUG_ON(buffer->free); - BUG_ON(size > buffer_size); - BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < proc->buffer); - BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); - - if (buffer->async_transaction) { - proc->free_async_space += size + sizeof(struct binder_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_free_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - binder_update_page_range(proc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); - rb_erase(&buffer->rb_node, &proc->allocated_buffers); - buffer->free = 1; - if (!list_is_last(&buffer->entry, &proc->buffers)) { - struct binder_buffer *next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - if (next->free) { - rb_erase(&next->rb_node, &proc->free_buffers); - binder_delete_free_buffer(proc, next); - } - } - if (proc->buffers.next != &buffer->entry) { - struct binder_buffer *prev = list_entry(buffer->entry.prev, - struct binder_buffer, entry); - if (prev->free) { - binder_delete_free_buffer(proc, buffer); - rb_erase(&prev->rb_node, &proc->free_buffers); - buffer = prev; - } - } - binder_insert_free_buffer(proc, buffer); -} - -static struct binder_node *binder_get_node(struct binder_proc *proc, - void __user *ptr) -{ - struct rb_node *n = proc->nodes.rb_node; - struct binder_node *node; - - while (n) { - node = rb_entry(n, struct binder_node, rb_node); - - if (ptr < node->ptr) - n = n->rb_left; - else if (ptr > node->ptr) - n = n->rb_right; - else - return node; - } - return NULL; -} - -static struct binder_node *binder_new_node(struct binder_proc *proc, - void __user *ptr, - void __user *cookie) -{ - struct rb_node **p = &proc->nodes.rb_node; - struct rb_node *parent = NULL; - struct binder_node *node; - - while (*p) { - parent = *p; - node = rb_entry(parent, struct binder_node, rb_node); - - if (ptr < node->ptr) - p = &(*p)->rb_left; - else if (ptr > node->ptr) - p = &(*p)->rb_right; - else - return NULL; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (node == NULL) - return NULL; - binder_stats_created(BINDER_STAT_NODE); - rb_link_node(&node->rb_node, parent, p); - rb_insert_color(&node->rb_node, &proc->nodes); - node->debug_id = ++binder_last_id; - node->proc = proc; - node->ptr = ptr; - node->cookie = cookie; - node->work.type = BINDER_WORK_NODE; - INIT_LIST_HEAD(&node->work.entry); - INIT_LIST_HEAD(&node->async_todo); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%p c%p created\n", - proc->pid, current->pid, node->debug_id, - node->ptr, node->cookie); - return node; -} - -static int binder_inc_node(struct binder_node *node, int strong, int internal, - struct list_head *target_list) -{ - if (strong) { - if (internal) { - if (target_list == NULL && - node->internal_strong_refs == 0 && - !(node == binder_context_mgr_node && - node->has_strong_ref)) { - pr_err("invalid inc strong node for %d\n", - node->debug_id); - return -EINVAL; - } - node->internal_strong_refs++; - } else - node->local_strong_refs++; - if (!node->has_strong_ref && target_list) { - list_del_init(&node->work.entry); - list_add_tail(&node->work.entry, target_list); - } - } else { - if (!internal) - node->local_weak_refs++; - if (!node->has_weak_ref && list_empty(&node->work.entry)) { - if (target_list == NULL) { - pr_err("invalid inc weak node for %d\n", - node->debug_id); - return -EINVAL; - } - list_add_tail(&node->work.entry, target_list); - } - } - return 0; -} - -static int binder_dec_node(struct binder_node *node, int strong, int internal) -{ - if (strong) { - if (internal) - node->internal_strong_refs--; - else - node->local_strong_refs--; - if (node->local_strong_refs || node->internal_strong_refs) - return 0; - } else { - if (!internal) - node->local_weak_refs--; - if (node->local_weak_refs || !hlist_empty(&node->refs)) - return 0; - } - if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { - if (list_empty(&node->work.entry)) { - list_add_tail(&node->work.entry, &node->proc->todo); - wake_up_interruptible(&node->proc->wait); - } - } else { - if (hlist_empty(&node->refs) && !node->local_strong_refs && - !node->local_weak_refs) { - list_del_init(&node->work.entry); - if (node->proc) { - rb_erase(&node->rb_node, &node->proc->nodes); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "refless node %d deleted\n", - node->debug_id); - } else { - hlist_del(&node->dead_node); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "dead node %d deleted\n", - node->debug_id); - } - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - } - } - - return 0; -} - - -static struct binder_ref *binder_get_ref(struct binder_proc *proc, - uint32_t desc) -{ - struct rb_node *n = proc->refs_by_desc.rb_node; - struct binder_ref *ref; - - while (n) { - ref = rb_entry(n, struct binder_ref, rb_node_desc); - - if (desc < ref->desc) - n = n->rb_left; - else if (desc > ref->desc) - n = n->rb_right; - else - return ref; - } - return NULL; -} - -static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, - struct binder_node *node) -{ - struct rb_node *n; - struct rb_node **p = &proc->refs_by_node.rb_node; - struct rb_node *parent = NULL; - struct binder_ref *ref, *new_ref; - - while (*p) { - parent = *p; - ref = rb_entry(parent, struct binder_ref, rb_node_node); - - if (node < ref->node) - p = &(*p)->rb_left; - else if (node > ref->node) - p = &(*p)->rb_right; - else - return ref; - } - new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); - if (new_ref == NULL) - return NULL; - binder_stats_created(BINDER_STAT_REF); - new_ref->debug_id = ++binder_last_id; - new_ref->proc = proc; - new_ref->node = node; - rb_link_node(&new_ref->rb_node_node, parent, p); - rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - - new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { - ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->desc > new_ref->desc) - break; - new_ref->desc = ref->desc + 1; - } - - p = &proc->refs_by_desc.rb_node; - while (*p) { - parent = *p; - ref = rb_entry(parent, struct binder_ref, rb_node_desc); - - if (new_ref->desc < ref->desc) - p = &(*p)->rb_left; - else if (new_ref->desc > ref->desc) - p = &(*p)->rb_right; - else - BUG(); - } - rb_link_node(&new_ref->rb_node_desc, parent, p); - rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); - if (node) { - hlist_add_head(&new_ref->node_entry, &node->refs); - - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for node %d\n", - proc->pid, new_ref->debug_id, new_ref->desc, - node->debug_id); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for dead node\n", - proc->pid, new_ref->debug_id, new_ref->desc); - } - return new_ref; -} - -static void binder_delete_ref(struct binder_ref *ref) -{ - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d delete ref %d desc %d for node %d\n", - ref->proc->pid, ref->debug_id, ref->desc, - ref->node->debug_id); - - rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); - rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); - if (ref->strong) - binder_dec_node(ref->node, 1, 1); - hlist_del(&ref->node_entry); - binder_dec_node(ref->node, 0, 1); - if (ref->death) { - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d delete ref %d desc %d has death notification\n", - ref->proc->pid, ref->debug_id, ref->desc); - list_del(&ref->death->work.entry); - kfree(ref->death); - binder_stats_deleted(BINDER_STAT_DEATH); - } - kfree(ref); - binder_stats_deleted(BINDER_STAT_REF); -} - -static int binder_inc_ref(struct binder_ref *ref, int strong, - struct list_head *target_list) -{ - int ret; - if (strong) { - if (ref->strong == 0) { - ret = binder_inc_node(ref->node, 1, 1, target_list); - if (ret) - return ret; - } - ref->strong++; - } else { - if (ref->weak == 0) { - ret = binder_inc_node(ref->node, 0, 1, target_list); - if (ret) - return ret; - } - ref->weak++; - } - return 0; -} - - -static int binder_dec_ref(struct binder_ref *ref, int strong) -{ - if (strong) { - if (ref->strong == 0) { - binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; - } - ref->strong--; - if (ref->strong == 0) { - int ret; - ret = binder_dec_node(ref->node, strong, 1); - if (ret) - return ret; - } - } else { - if (ref->weak == 0) { - binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; - } - ref->weak--; - } - if (ref->strong == 0 && ref->weak == 0) - binder_delete_ref(ref); - return 0; -} - -static void binder_pop_transaction(struct binder_thread *target_thread, - struct binder_transaction *t) -{ - if (target_thread) { - BUG_ON(target_thread->transaction_stack != t); - BUG_ON(target_thread->transaction_stack->from != target_thread); - target_thread->transaction_stack = - target_thread->transaction_stack->from_parent; - t->from = NULL; - } - t->need_reply = 0; - if (t->buffer) - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); -} - -static void binder_send_failed_reply(struct binder_transaction *t, - uint32_t error_code) -{ - struct binder_thread *target_thread; - BUG_ON(t->flags & TF_ONE_WAY); - while (1) { - target_thread = t->from; - if (target_thread) { - if (target_thread->return_error != BR_OK && - target_thread->return_error2 == BR_OK) { - target_thread->return_error2 = - target_thread->return_error; - target_thread->return_error = BR_OK; - } - if (target_thread->return_error == BR_OK) { - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "send failed reply for transaction %d to %d:%d\n", - t->debug_id, target_thread->proc->pid, - target_thread->pid); - - binder_pop_transaction(target_thread, t); - target_thread->return_error = error_code; - wake_up_interruptible(&target_thread->wait); - } else { - pr_err("reply failed, target thread, %d:%d, has error code %d already\n", - target_thread->proc->pid, - target_thread->pid, - target_thread->return_error); - } - return; - } else { - struct binder_transaction *next = t->from_parent; - - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "send failed reply for transaction %d, target dead\n", - t->debug_id); - - binder_pop_transaction(target_thread, t); - if (next == NULL) { - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "reply failed, no target thread at root\n"); - return; - } - t = next; - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "reply failed, no target thread -- retry %d\n", - t->debug_id); - } - } -} - -static void binder_transaction_buffer_release(struct binder_proc *proc, - struct binder_buffer *buffer, - size_t *failed_at) -{ - size_t *offp, *off_end; - int debug_id = buffer->debug_id; - - binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %p\n", - proc->pid, buffer->debug_id, - buffer->data_size, buffer->offsets_size, failed_at); - - if (buffer->target_node) - binder_dec_node(buffer->target_node, 1, 0); - - offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); - if (failed_at) - off_end = failed_at; - else - off_end = (void *)offp + buffer->offsets_size; - for (; offp < off_end; offp++) { - struct flat_binder_object *fp; - if (*offp > buffer->data_size - sizeof(*fp) || - buffer->data_size < sizeof(*fp) || - !IS_ALIGNED(*offp, sizeof(void *))) { - pr_err("transaction release %d bad offset %zd, size %zd\n", - debug_id, *offp, buffer->data_size); - continue; - } - fp = (struct flat_binder_object *)(buffer->data + *offp); - switch (fp->type) { - case BINDER_TYPE_BINDER: - case BINDER_TYPE_WEAK_BINDER: { - struct binder_node *node = binder_get_node(proc, fp->binder); - if (node == NULL) { - pr_err("transaction release %d bad node %p\n", - debug_id, fp->binder); - break; - } - binder_debug(BINDER_DEBUG_TRANSACTION, - " node %d u%p\n", - node->debug_id, node->ptr); - binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); - } break; - case BINDER_TYPE_HANDLE: - case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); - if (ref == NULL) { - pr_err("transaction release %d bad handle %ld\n", - debug_id, fp->handle); - break; - } - binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, ref->node->debug_id); - binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); - } break; - - case BINDER_TYPE_FD: - binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %ld\n", fp->handle); - if (failed_at) - task_close_fd(proc, fp->handle); - break; - - default: - pr_err("transaction release %d bad object type %lx\n", - debug_id, fp->type); - break; - } - } -} - -static void binder_transaction(struct binder_proc *proc, - struct binder_thread *thread, - struct binder_transaction_data *tr, int reply) -{ - struct binder_transaction *t; - struct binder_work *tcomplete; - size_t *offp, *off_end; - struct binder_proc *target_proc; - struct binder_thread *target_thread = NULL; - struct binder_node *target_node = NULL; - struct list_head *target_list; - wait_queue_head_t *target_wait; - struct binder_transaction *in_reply_to = NULL; - struct binder_transaction_log_entry *e; - uint32_t return_error; - - e = binder_transaction_log_add(&binder_transaction_log); - e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); - e->from_proc = proc->pid; - e->from_thread = thread->pid; - e->target_handle = tr->target.handle; - e->data_size = tr->data_size; - e->offsets_size = tr->offsets_size; - - if (reply) { - in_reply_to = thread->transaction_stack; - if (in_reply_to == NULL) { - binder_user_error("%d:%d got reply transaction with no transaction stack\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - goto err_empty_call_stack; - } - binder_set_nice(in_reply_to->saved_priority); - if (in_reply_to->to_thread != thread) { - binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", - proc->pid, thread->pid, in_reply_to->debug_id, - in_reply_to->to_proc ? - in_reply_to->to_proc->pid : 0, - in_reply_to->to_thread ? - in_reply_to->to_thread->pid : 0); - return_error = BR_FAILED_REPLY; - in_reply_to = NULL; - goto err_bad_call_stack; - } - thread->transaction_stack = in_reply_to->to_parent; - target_thread = in_reply_to->from; - if (target_thread == NULL) { - return_error = BR_DEAD_REPLY; - goto err_dead_binder; - } - if (target_thread->transaction_stack != in_reply_to) { - binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", - proc->pid, thread->pid, - target_thread->transaction_stack ? - target_thread->transaction_stack->debug_id : 0, - in_reply_to->debug_id); - return_error = BR_FAILED_REPLY; - in_reply_to = NULL; - target_thread = NULL; - goto err_dead_binder; - } - target_proc = target_thread->proc; - } else { - if (tr->target.handle) { - struct binder_ref *ref; - ref = binder_get_ref(proc, tr->target.handle); - if (ref == NULL) { - binder_user_error("%d:%d got transaction to invalid handle\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - goto err_invalid_target_handle; - } - target_node = ref->node; - } else { - target_node = binder_context_mgr_node; - if (target_node == NULL) { - return_error = BR_DEAD_REPLY; - goto err_no_context_mgr_node; - } - } - e->to_node = target_node->debug_id; - target_proc = target_node->proc; - if (target_proc == NULL) { - return_error = BR_DEAD_REPLY; - goto err_dead_binder; - } - if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { - struct binder_transaction *tmp; - tmp = thread->transaction_stack; - if (tmp->to_thread != thread) { - binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", - proc->pid, thread->pid, tmp->debug_id, - tmp->to_proc ? tmp->to_proc->pid : 0, - tmp->to_thread ? - tmp->to_thread->pid : 0); - return_error = BR_FAILED_REPLY; - goto err_bad_call_stack; - } - while (tmp) { - if (tmp->from && tmp->from->proc == target_proc) - target_thread = tmp->from; - tmp = tmp->from_parent; - } - } - } - if (target_thread) { - e->to_thread = target_thread->pid; - target_list = &target_thread->todo; - target_wait = &target_thread->wait; - } else { - target_list = &target_proc->todo; - target_wait = &target_proc->wait; - } - e->to_proc = target_proc->pid; - - /* TODO: reuse incoming transaction for reply */ - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (t == NULL) { - return_error = BR_FAILED_REPLY; - goto err_alloc_t_failed; - } - binder_stats_created(BINDER_STAT_TRANSACTION); - - tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); - if (tcomplete == NULL) { - return_error = BR_FAILED_REPLY; - goto err_alloc_tcomplete_failed; - } - binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); - - t->debug_id = ++binder_last_id; - e->debug_id = t->debug_id; - - if (reply) - binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n", - proc->pid, thread->pid, t->debug_id, - target_proc->pid, target_thread->pid, - tr->data.ptr.buffer, tr->data.ptr.offsets, - tr->data_size, tr->offsets_size); - else - binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n", - proc->pid, thread->pid, t->debug_id, - target_proc->pid, target_node->debug_id, - tr->data.ptr.buffer, tr->data.ptr.offsets, - tr->data_size, tr->offsets_size); - - if (!reply && !(tr->flags & TF_ONE_WAY)) - t->from = thread; - else - t->from = NULL; - t->sender_euid = proc->tsk->cred->euid; - t->to_proc = target_proc; - t->to_thread = target_thread; - t->code = tr->code; - t->flags = tr->flags; - t->priority = task_nice(current); - - trace_binder_transaction(reply, t, target_node); - - t->buffer = binder_alloc_buf(target_proc, tr->data_size, - tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); - if (t->buffer == NULL) { - return_error = BR_FAILED_REPLY; - goto err_binder_alloc_buf_failed; - } - t->buffer->allow_user_free = 0; - t->buffer->debug_id = t->debug_id; - t->buffer->transaction = t; - t->buffer->target_node = target_node; - trace_binder_transaction_alloc_buf(t->buffer); - if (target_node) - binder_inc_node(target_node, 1, 0, NULL); - - offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); - - if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - goto err_copy_data_failed; - } - if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - goto err_copy_data_failed; - } - if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { - binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n", - proc->pid, thread->pid, tr->offsets_size); - return_error = BR_FAILED_REPLY; - goto err_bad_offset; - } - off_end = (void *)offp + tr->offsets_size; - for (; offp < off_end; offp++) { - struct flat_binder_object *fp; - if (*offp > t->buffer->data_size - sizeof(*fp) || - t->buffer->data_size < sizeof(*fp) || - !IS_ALIGNED(*offp, sizeof(void *))) { - binder_user_error("%d:%d got transaction with invalid offset, %zd\n", - proc->pid, thread->pid, *offp); - return_error = BR_FAILED_REPLY; - goto err_bad_offset; - } - fp = (struct flat_binder_object *)(t->buffer->data + *offp); - switch (fp->type) { - case BINDER_TYPE_BINDER: - case BINDER_TYPE_WEAK_BINDER: { - struct binder_ref *ref; - struct binder_node *node = binder_get_node(proc, fp->binder); - if (node == NULL) { - node = binder_new_node(proc, fp->binder, fp->cookie); - if (node == NULL) { - return_error = BR_FAILED_REPLY; - goto err_binder_new_node_failed; - } - node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; - node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); - } - if (fp->cookie != node->cookie) { - binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n", - proc->pid, thread->pid, - fp->binder, node->debug_id, - fp->cookie, node->cookie); - goto err_binder_get_ref_for_node_failed; - } - ref = binder_get_ref_for_node(target_proc, node); - if (ref == NULL) { - return_error = BR_FAILED_REPLY; - goto err_binder_get_ref_for_node_failed; - } - if (fp->type == BINDER_TYPE_BINDER) - fp->type = BINDER_TYPE_HANDLE; - else - fp->type = BINDER_TYPE_WEAK_HANDLE; - fp->handle = ref->desc; - binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, - &thread->todo); - - trace_binder_transaction_node_to_ref(t, node, ref); - binder_debug(BINDER_DEBUG_TRANSACTION, - " node %d u%p -> ref %d desc %d\n", - node->debug_id, node->ptr, ref->debug_id, - ref->desc); - } break; - case BINDER_TYPE_HANDLE: - case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); - if (ref == NULL) { - binder_user_error("%d:%d got transaction with invalid handle, %ld\n", - proc->pid, - thread->pid, fp->handle); - return_error = BR_FAILED_REPLY; - goto err_binder_get_ref_failed; - } - if (ref->node->proc == target_proc) { - if (fp->type == BINDER_TYPE_HANDLE) - fp->type = BINDER_TYPE_BINDER; - else - fp->type = BINDER_TYPE_WEAK_BINDER; - fp->binder = ref->node->ptr; - fp->cookie = ref->node->cookie; - binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); - trace_binder_transaction_ref_to_node(t, ref); - binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> node %d u%p\n", - ref->debug_id, ref->desc, ref->node->debug_id, - ref->node->ptr); - } else { - struct binder_ref *new_ref; - new_ref = binder_get_ref_for_node(target_proc, ref->node); - if (new_ref == NULL) { - return_error = BR_FAILED_REPLY; - goto err_binder_get_ref_for_node_failed; - } - fp->handle = new_ref->desc; - binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); - trace_binder_transaction_ref_to_ref(t, ref, - new_ref); - binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, new_ref->debug_id, - new_ref->desc, ref->node->debug_id); - } - } break; - - case BINDER_TYPE_FD: { - int target_fd; - struct file *file; - - if (reply) { - if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { - binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n", - proc->pid, thread->pid, fp->handle); - return_error = BR_FAILED_REPLY; - goto err_fd_not_allowed; - } - } else if (!target_node->accept_fds) { - binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n", - proc->pid, thread->pid, fp->handle); - return_error = BR_FAILED_REPLY; - goto err_fd_not_allowed; - } - - file = fget(fp->handle); - if (file == NULL) { - binder_user_error("%d:%d got transaction with invalid fd, %ld\n", - proc->pid, thread->pid, fp->handle); - return_error = BR_FAILED_REPLY; - goto err_fget_failed; - } - target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); - if (target_fd < 0) { - fput(file); - return_error = BR_FAILED_REPLY; - goto err_get_unused_fd_failed; - } - task_fd_install(target_proc, target_fd, file); - trace_binder_transaction_fd(t, fp->handle, target_fd); - binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %ld -> %d\n", fp->handle, target_fd); - /* TODO: fput? */ - fp->handle = target_fd; - } break; - - default: - binder_user_error("%d:%d got transaction with invalid object type, %lx\n", - proc->pid, thread->pid, fp->type); - return_error = BR_FAILED_REPLY; - goto err_bad_object_type; - } - } - if (reply) { - BUG_ON(t->buffer->async_transaction != 0); - binder_pop_transaction(target_thread, in_reply_to); - } else if (!(t->flags & TF_ONE_WAY)) { - BUG_ON(t->buffer->async_transaction != 0); - t->need_reply = 1; - t->from_parent = thread->transaction_stack; - thread->transaction_stack = t; - } else { - BUG_ON(target_node == NULL); - BUG_ON(t->buffer->async_transaction != 1); - if (target_node->has_async_transaction) { - target_list = &target_node->async_todo; - target_wait = NULL; - } else - target_node->has_async_transaction = 1; - } - t->work.type = BINDER_WORK_TRANSACTION; - list_add_tail(&t->work.entry, target_list); - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - list_add_tail(&tcomplete->entry, &thread->todo); - if (target_wait) - wake_up_interruptible(target_wait); - return; - -err_get_unused_fd_failed: -err_fget_failed: -err_fd_not_allowed: -err_binder_get_ref_for_node_failed: -err_binder_get_ref_failed: -err_binder_new_node_failed: -err_bad_object_type: -err_bad_offset: -err_copy_data_failed: - trace_binder_transaction_failed_buffer_release(t->buffer); - binder_transaction_buffer_release(target_proc, t->buffer, offp); - t->buffer->transaction = NULL; - binder_free_buf(target_proc, t->buffer); -err_binder_alloc_buf_failed: - kfree(tcomplete); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); -err_alloc_tcomplete_failed: - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); -err_alloc_t_failed: -err_bad_call_stack: -err_empty_call_stack: -err_dead_binder: -err_invalid_target_handle: -err_no_context_mgr_node: - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction failed %d, size %zd-%zd\n", - proc->pid, thread->pid, return_error, - tr->data_size, tr->offsets_size); - - { - struct binder_transaction_log_entry *fe; - fe = binder_transaction_log_add(&binder_transaction_log_failed); - *fe = *e; - } - - BUG_ON(thread->return_error != BR_OK); - if (in_reply_to) { - thread->return_error = BR_TRANSACTION_COMPLETE; - binder_send_failed_reply(in_reply_to, return_error); - } else - thread->return_error = return_error; -} - -int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, - void __user *buffer, int size, signed long *consumed) -{ - uint32_t cmd; - void __user *ptr = buffer + *consumed; - void __user *end = buffer + size; - - while (ptr < end && thread->return_error == BR_OK) { - if (get_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - trace_binder_command(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { - binder_stats.bc[_IOC_NR(cmd)]++; - proc->stats.bc[_IOC_NR(cmd)]++; - thread->stats.bc[_IOC_NR(cmd)]++; - } - switch (cmd) { - case BC_INCREFS: - case BC_ACQUIRE: - case BC_RELEASE: - case BC_DECREFS: { - uint32_t target; - struct binder_ref *ref; - const char *debug_string; - - if (get_user(target, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (target == 0 && binder_context_mgr_node && - (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { - ref = binder_get_ref_for_node(proc, - binder_context_mgr_node); - if (ref->desc != target) { - binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", - proc->pid, thread->pid, - ref->desc); - } - } else - ref = binder_get_ref(proc, target); - if (ref == NULL) { - binder_user_error("%d:%d refcount change on invalid ref %d\n", - proc->pid, thread->pid, target); - break; - } - switch (cmd) { - case BC_INCREFS: - debug_string = "IncRefs"; - binder_inc_ref(ref, 0, NULL); - break; - case BC_ACQUIRE: - debug_string = "Acquire"; - binder_inc_ref(ref, 1, NULL); - break; - case BC_RELEASE: - debug_string = "Release"; - binder_dec_ref(ref, 1); - break; - case BC_DECREFS: - default: - debug_string = "DecRefs"; - binder_dec_ref(ref, 0); - break; - } - binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s ref %d desc %d s %d w %d for node %d\n", - proc->pid, thread->pid, debug_string, ref->debug_id, - ref->desc, ref->strong, ref->weak, ref->node->debug_id); - break; - } - case BC_INCREFS_DONE: - case BC_ACQUIRE_DONE: { - void __user *node_ptr; - void *cookie; - struct binder_node *node; - - if (get_user(node_ptr, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - if (get_user(cookie, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - node = binder_get_node(proc, node_ptr); - if (node == NULL) { - binder_user_error("%d:%d %s u%p no match\n", - proc->pid, thread->pid, - cmd == BC_INCREFS_DONE ? - "BC_INCREFS_DONE" : - "BC_ACQUIRE_DONE", - node_ptr); - break; - } - if (cookie != node->cookie) { - binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n", - proc->pid, thread->pid, - cmd == BC_INCREFS_DONE ? - "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", - node_ptr, node->debug_id, - cookie, node->cookie); - break; - } - if (cmd == BC_ACQUIRE_DONE) { - if (node->pending_strong_ref == 0) { - binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", - proc->pid, thread->pid, - node->debug_id); - break; - } - node->pending_strong_ref = 0; - } else { - if (node->pending_weak_ref == 0) { - binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", - proc->pid, thread->pid, - node->debug_id); - break; - } - node->pending_weak_ref = 0; - } - binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); - binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s node %d ls %d lw %d\n", - proc->pid, thread->pid, - cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", - node->debug_id, node->local_strong_refs, node->local_weak_refs); - break; - } - case BC_ATTEMPT_ACQUIRE: - pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); - return -EINVAL; - case BC_ACQUIRE_RESULT: - pr_err("BC_ACQUIRE_RESULT not supported\n"); - return -EINVAL; - - case BC_FREE_BUFFER: { - void __user *data_ptr; - struct binder_buffer *buffer; - - if (get_user(data_ptr, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - - buffer = binder_buffer_lookup(proc, data_ptr); - if (buffer == NULL) { - binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n", - proc->pid, thread->pid, data_ptr); - break; - } - if (!buffer->allow_user_free) { - binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n", - proc->pid, thread->pid, data_ptr); - break; - } - binder_debug(BINDER_DEBUG_FREE_BUFFER, - "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", - proc->pid, thread->pid, data_ptr, buffer->debug_id, - buffer->transaction ? "active" : "finished"); - - if (buffer->transaction) { - buffer->transaction->buffer = NULL; - buffer->transaction = NULL; - } - if (buffer->async_transaction && buffer->target_node) { - BUG_ON(!buffer->target_node->has_async_transaction); - if (list_empty(&buffer->target_node->async_todo)) - buffer->target_node->has_async_transaction = 0; - else - list_move_tail(buffer->target_node->async_todo.next, &thread->todo); - } - trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); - binder_free_buf(proc, buffer); - break; - } - - case BC_TRANSACTION: - case BC_REPLY: { - struct binder_transaction_data tr; - - if (copy_from_user(&tr, ptr, sizeof(tr))) - return -EFAULT; - ptr += sizeof(tr); - binder_transaction(proc, thread, &tr, cmd == BC_REPLY); - break; - } - - case BC_REGISTER_LOOPER: - binder_debug(BINDER_DEBUG_THREADS, - "%d:%d BC_REGISTER_LOOPER\n", - proc->pid, thread->pid); - if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { - thread->looper |= BINDER_LOOPER_STATE_INVALID; - binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", - proc->pid, thread->pid); - } else if (proc->requested_threads == 0) { - thread->looper |= BINDER_LOOPER_STATE_INVALID; - binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", - proc->pid, thread->pid); - } else { - proc->requested_threads--; - proc->requested_threads_started++; - } - thread->looper |= BINDER_LOOPER_STATE_REGISTERED; - break; - case BC_ENTER_LOOPER: - binder_debug(BINDER_DEBUG_THREADS, - "%d:%d BC_ENTER_LOOPER\n", - proc->pid, thread->pid); - if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { - thread->looper |= BINDER_LOOPER_STATE_INVALID; - binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", - proc->pid, thread->pid); - } - thread->looper |= BINDER_LOOPER_STATE_ENTERED; - break; - case BC_EXIT_LOOPER: - binder_debug(BINDER_DEBUG_THREADS, - "%d:%d BC_EXIT_LOOPER\n", - proc->pid, thread->pid); - thread->looper |= BINDER_LOOPER_STATE_EXITED; - break; - - case BC_REQUEST_DEATH_NOTIFICATION: - case BC_CLEAR_DEATH_NOTIFICATION: { - uint32_t target; - void __user *cookie; - struct binder_ref *ref; - struct binder_ref_death *death; - - if (get_user(target, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (get_user(cookie, (void __user * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - ref = binder_get_ref(proc, target); - if (ref == NULL) { - binder_user_error("%d:%d %s invalid ref %d\n", - proc->pid, thread->pid, - cmd == BC_REQUEST_DEATH_NOTIFICATION ? - "BC_REQUEST_DEATH_NOTIFICATION" : - "BC_CLEAR_DEATH_NOTIFICATION", - target); - break; - } - - binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, - "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n", - proc->pid, thread->pid, - cmd == BC_REQUEST_DEATH_NOTIFICATION ? - "BC_REQUEST_DEATH_NOTIFICATION" : - "BC_CLEAR_DEATH_NOTIFICATION", - cookie, ref->debug_id, ref->desc, - ref->strong, ref->weak, ref->node->debug_id); - - if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { - if (ref->death) { - binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", - proc->pid, thread->pid); - break; - } - death = kzalloc(sizeof(*death), GFP_KERNEL); - if (death == NULL) { - thread->return_error = BR_ERROR; - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", - proc->pid, thread->pid); - break; - } - binder_stats_created(BINDER_STAT_DEATH); - INIT_LIST_HEAD(&death->work.entry); - death->cookie = cookie; - ref->death = death; - if (ref->node->proc == NULL) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&ref->death->work.entry, &thread->todo); - } else { - list_add_tail(&ref->death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); - } - } - } else { - if (ref->death == NULL) { - binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", - proc->pid, thread->pid); - break; - } - death = ref->death; - if (death->cookie != cookie) { - binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n", - proc->pid, thread->pid, - death->cookie, cookie); - break; - } - ref->death = NULL; - if (list_empty(&death->work.entry)) { - death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); - } - } else { - BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); - death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; - } - } - } break; - case BC_DEAD_BINDER_DONE: { - struct binder_work *w; - void __user *cookie; - struct binder_ref_death *death = NULL; - if (get_user(cookie, (void __user * __user *)ptr)) - return -EFAULT; - - ptr += sizeof(void *); - list_for_each_entry(w, &proc->delivered_death, entry) { - struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); - if (tmp_death->cookie == cookie) { - death = tmp_death; - break; - } - } - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %p found %p\n", - proc->pid, thread->pid, cookie, death); - if (death == NULL) { - binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n", - proc->pid, thread->pid, cookie); - break; - } - - list_del_init(&death->work.entry); - if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { - death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); - } - } - } break; - - default: - pr_err("%d:%d unknown command %d\n", - proc->pid, thread->pid, cmd); - return -EINVAL; - } - *consumed = ptr - buffer; - } - return 0; -} - -void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, - uint32_t cmd) -{ - trace_binder_return(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { - binder_stats.br[_IOC_NR(cmd)]++; - proc->stats.br[_IOC_NR(cmd)]++; - thread->stats.br[_IOC_NR(cmd)]++; - } -} - -static int binder_has_proc_work(struct binder_proc *proc, - struct binder_thread *thread) -{ - return !list_empty(&proc->todo) || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); -} - -static int binder_has_thread_work(struct binder_thread *thread) -{ - return !list_empty(&thread->todo) || thread->return_error != BR_OK || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); -} - -static int binder_thread_read(struct binder_proc *proc, - struct binder_thread *thread, - void __user *buffer, int size, - signed long *consumed, int non_block) -{ - void __user *ptr = buffer + *consumed; - void __user *end = buffer + size; - - int ret = 0; - int wait_for_proc_work; - - if (*consumed == 0) { - if (put_user(BR_NOOP, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - } - -retry: - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo); - - if (thread->return_error != BR_OK && ptr < end) { - if (thread->return_error2 != BR_OK) { - if (put_user(thread->return_error2, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error2); - if (ptr == end) - goto done; - thread->return_error2 = BR_OK; - } - if (put_user(thread->return_error, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error); - thread->return_error = BR_OK; - goto done; - } - - - thread->looper |= BINDER_LOOPER_STATE_WAITING; - if (wait_for_proc_work) - proc->ready_threads++; - - binder_unlock(__func__); - - trace_binder_wait_for_work(wait_for_proc_work, - !!thread->transaction_stack, - !list_empty(&thread->todo)); - if (wait_for_proc_work) { - if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | - BINDER_LOOPER_STATE_ENTERED))) { - binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", - proc->pid, thread->pid, thread->looper); - wait_event_interruptible(binder_user_error_wait, - binder_stop_on_user_error < 2); - } - binder_set_nice(proc->default_priority); - if (non_block) { - if (!binder_has_proc_work(proc, thread)) - ret = -EAGAIN; - } else - ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); - } else { - if (non_block) { - if (!binder_has_thread_work(thread)) - ret = -EAGAIN; - } else - ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); - } - - binder_lock(__func__); - - if (wait_for_proc_work) - proc->ready_threads--; - thread->looper &= ~BINDER_LOOPER_STATE_WAITING; - - if (ret) - return ret; - - while (1) { - uint32_t cmd; - struct binder_transaction_data tr; - struct binder_work *w; - struct binder_transaction *t = NULL; - - if (!list_empty(&thread->todo)) - w = list_first_entry(&thread->todo, struct binder_work, entry); - else if (!list_empty(&proc->todo) && wait_for_proc_work) - w = list_first_entry(&proc->todo, struct binder_work, entry); - else { - if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ - goto retry; - break; - } - - if (end - ptr < sizeof(tr) + 4) - break; - - switch (w->type) { - case BINDER_WORK_TRANSACTION: { - t = container_of(w, struct binder_transaction, work); - } break; - case BINDER_WORK_TRANSACTION_COMPLETE: { - cmd = BR_TRANSACTION_COMPLETE; - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, - "%d:%d BR_TRANSACTION_COMPLETE\n", - proc->pid, thread->pid); - - list_del(&w->entry); - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); - } break; - case BINDER_WORK_NODE: { - struct binder_node *node = container_of(w, struct binder_node, work); - uint32_t cmd = BR_NOOP; - const char *cmd_name; - int strong = node->internal_strong_refs || node->local_strong_refs; - int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; - if (weak && !node->has_weak_ref) { - cmd = BR_INCREFS; - cmd_name = "BR_INCREFS"; - node->has_weak_ref = 1; - node->pending_weak_ref = 1; - node->local_weak_refs++; - } else if (strong && !node->has_strong_ref) { - cmd = BR_ACQUIRE; - cmd_name = "BR_ACQUIRE"; - node->has_strong_ref = 1; - node->pending_strong_ref = 1; - node->local_strong_refs++; - } else if (!strong && node->has_strong_ref) { - cmd = BR_RELEASE; - cmd_name = "BR_RELEASE"; - node->has_strong_ref = 0; - } else if (!weak && node->has_weak_ref) { - cmd = BR_DECREFS; - cmd_name = "BR_DECREFS"; - node->has_weak_ref = 0; - } - if (cmd != BR_NOOP) { - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user(node->ptr, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - if (put_user(node->cookie, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s %d u%p c%p\n", - proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); - } else { - list_del_init(&w->entry); - if (!weak && !strong) { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%p c%p deleted\n", - proc->pid, thread->pid, node->debug_id, - node->ptr, node->cookie); - rb_erase(&node->rb_node, &proc->nodes); - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%p c%p state unchanged\n", - proc->pid, thread->pid, node->debug_id, node->ptr, - node->cookie); - } - } - } break; - case BINDER_WORK_DEAD_BINDER: - case BINDER_WORK_DEAD_BINDER_AND_CLEAR: - case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { - struct binder_ref_death *death; - uint32_t cmd; - - death = container_of(w, struct binder_ref_death, work); - if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) - cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; - else - cmd = BR_DEAD_BINDER; - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user(death->cookie, (void * __user *)ptr)) - return -EFAULT; - ptr += sizeof(void *); - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, - "%d:%d %s %p\n", - proc->pid, thread->pid, - cmd == BR_DEAD_BINDER ? - "BR_DEAD_BINDER" : - "BR_CLEAR_DEATH_NOTIFICATION_DONE", - death->cookie); - - if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { - list_del(&w->entry); - kfree(death); - binder_stats_deleted(BINDER_STAT_DEATH); - } else - list_move(&w->entry, &proc->delivered_death); - if (cmd == BR_DEAD_BINDER) - goto done; /* DEAD_BINDER notifications can cause transactions */ - } break; - } - - if (!t) - continue; - - BUG_ON(t->buffer == NULL); - if (t->buffer->target_node) { - struct binder_node *target_node = t->buffer->target_node; - tr.target.ptr = target_node->ptr; - tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); - cmd = BR_TRANSACTION; - } else { - tr.target.ptr = NULL; - tr.cookie = NULL; - cmd = BR_REPLY; - } - tr.code = t->code; - tr.flags = t->flags; - tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); - - if (t->from) { - struct task_struct *sender = t->from->proc->tsk; - tr.sender_pid = task_tgid_nr_ns(sender, - task_active_pid_ns(current)); - } else { - tr.sender_pid = 0; - } - - tr.data_size = t->buffer->data_size; - tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (void *)t->buffer->data + - proc->user_buffer_offset; - tr.data.ptr.offsets = tr.data.ptr.buffer + - ALIGN(t->buffer->data_size, - sizeof(void *)); - - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, sizeof(tr))) - return -EFAULT; - ptr += sizeof(tr); - - trace_binder_transaction_received(t); - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n", - proc->pid, thread->pid, - (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - "BR_REPLY", - t->debug_id, t->from ? t->from->proc->pid : 0, - t->from ? t->from->pid : 0, cmd, - t->buffer->data_size, t->buffer->offsets_size, - tr.data.ptr.buffer, tr.data.ptr.offsets); - - list_del(&t->work.entry); - t->buffer->allow_user_free = 1; - if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { - t->to_parent = thread->transaction_stack; - t->to_thread = thread; - thread->transaction_stack = t; - } else { - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); - } - break; - } - -done: - - *consumed = ptr - buffer; - if (proc->requested_threads + proc->ready_threads == 0 && - proc->requested_threads_started < proc->max_threads && - (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | - BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ - /*spawn a new thread if we leave this out */) { - proc->requested_threads++; - binder_debug(BINDER_DEBUG_THREADS, - "%d:%d BR_SPAWN_LOOPER\n", - proc->pid, thread->pid); - if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) - return -EFAULT; - binder_stat_br(proc, thread, BR_SPAWN_LOOPER); - } - return 0; -} - -static void binder_release_work(struct list_head *list) -{ - struct binder_work *w; - while (!list_empty(list)) { - w = list_first_entry(list, struct binder_work, entry); - list_del_init(&w->entry); - switch (w->type) { - case BINDER_WORK_TRANSACTION: { - struct binder_transaction *t; - - t = container_of(w, struct binder_transaction, work); - if (t->buffer->target_node && - !(t->flags & TF_ONE_WAY)) { - binder_send_failed_reply(t, BR_DEAD_REPLY); - } else { - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered transaction %d\n", - t->debug_id); - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); - } - } break; - case BINDER_WORK_TRANSACTION_COMPLETE: { - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered TRANSACTION_COMPLETE\n"); - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); - } break; - case BINDER_WORK_DEAD_BINDER_AND_CLEAR: - case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { - struct binder_ref_death *death; - - death = container_of(w, struct binder_ref_death, work); - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered death notification, %p\n", - death->cookie); - kfree(death); - binder_stats_deleted(BINDER_STAT_DEATH); - } break; - default: - pr_err("unexpected work type, %d, not freed\n", - w->type); - break; - } - } - -} - -static struct binder_thread *binder_get_thread(struct binder_proc *proc) -{ - struct binder_thread *thread = NULL; - struct rb_node *parent = NULL; - struct rb_node **p = &proc->threads.rb_node; - - while (*p) { - parent = *p; - thread = rb_entry(parent, struct binder_thread, rb_node); - - if (current->pid < thread->pid) - p = &(*p)->rb_left; - else if (current->pid > thread->pid) - p = &(*p)->rb_right; - else - break; - } - if (*p == NULL) { - thread = kzalloc(sizeof(*thread), GFP_KERNEL); - if (thread == NULL) - return NULL; - binder_stats_created(BINDER_STAT_THREAD); - thread->proc = proc; - thread->pid = current->pid; - init_waitqueue_head(&thread->wait); - INIT_LIST_HEAD(&thread->todo); - rb_link_node(&thread->rb_node, parent, p); - rb_insert_color(&thread->rb_node, &proc->threads); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; - thread->return_error = BR_OK; - thread->return_error2 = BR_OK; - } - return thread; -} - -static int binder_free_thread(struct binder_proc *proc, - struct binder_thread *thread) -{ - struct binder_transaction *t; - struct binder_transaction *send_reply = NULL; - int active_transactions = 0; - - rb_erase(&thread->rb_node, &proc->threads); - t = thread->transaction_stack; - if (t && t->to_thread == thread) - send_reply = t; - while (t) { - active_transactions++; - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "release %d:%d transaction %d %s, still active\n", - proc->pid, thread->pid, - t->debug_id, - (t->to_thread == thread) ? "in" : "out"); - - if (t->to_thread == thread) { - t->to_proc = NULL; - t->to_thread = NULL; - if (t->buffer) { - t->buffer->transaction = NULL; - t->buffer = NULL; - } - t = t->to_parent; - } else if (t->from == thread) { - t->from = NULL; - t = t->from_parent; - } else - BUG(); - } - if (send_reply) - binder_send_failed_reply(send_reply, BR_DEAD_REPLY); - binder_release_work(&thread->todo); - kfree(thread); - binder_stats_deleted(BINDER_STAT_THREAD); - return active_transactions; -} - -static unsigned int binder_poll(struct file *filp, - struct poll_table_struct *wait) -{ - struct binder_proc *proc = filp->private_data; - struct binder_thread *thread = NULL; - int wait_for_proc_work; - - binder_lock(__func__); - - thread = binder_get_thread(proc); - - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo) && thread->return_error == BR_OK; - - binder_unlock(__func__); - - if (wait_for_proc_work) { - if (binder_has_proc_work(proc, thread)) - return POLLIN; - poll_wait(filp, &proc->wait, wait); - if (binder_has_proc_work(proc, thread)) - return POLLIN; - } else { - if (binder_has_thread_work(thread)) - return POLLIN; - poll_wait(filp, &thread->wait, wait); - if (binder_has_thread_work(thread)) - return POLLIN; - } - return 0; -} - -static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int ret; - struct binder_proc *proc = filp->private_data; - struct binder_thread *thread; - unsigned int size = _IOC_SIZE(cmd); - void __user *ubuf = (void __user *)arg; - - /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ - - trace_binder_ioctl(cmd, arg); - - ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); - if (ret) - goto err_unlocked; - - binder_lock(__func__); - thread = binder_get_thread(proc); - if (thread == NULL) { - ret = -ENOMEM; - goto err; - } - - switch (cmd) { - case BINDER_WRITE_READ: { - struct binder_write_read bwr; - if (size != sizeof(struct binder_write_read)) { - ret = -EINVAL; - goto err; - } - if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { - ret = -EFAULT; - goto err; - } - binder_debug(BINDER_DEBUG_READ_WRITE, - "%d:%d write %ld at %08lx, read %ld at %08lx\n", - proc->pid, thread->pid, bwr.write_size, - bwr.write_buffer, bwr.read_size, bwr.read_buffer); - - if (bwr.write_size > 0) { - ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); - trace_binder_write_done(ret); - if (ret < 0) { - bwr.read_consumed = 0; - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; - goto err; - } - } - if (bwr.read_size > 0) { - ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); - trace_binder_read_done(ret); - if (!list_empty(&proc->todo)) - wake_up_interruptible(&proc->wait); - if (ret < 0) { - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; - goto err; - } - } - binder_debug(BINDER_DEBUG_READ_WRITE, - "%d:%d wrote %ld of %ld, read return %ld of %ld\n", - proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, - bwr.read_consumed, bwr.read_size); - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { - ret = -EFAULT; - goto err; - } - break; - } - case BINDER_SET_MAX_THREADS: - if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { - ret = -EINVAL; - goto err; - } - break; - case BINDER_SET_CONTEXT_MGR: - if (binder_context_mgr_node != NULL) { - pr_err("BINDER_SET_CONTEXT_MGR already set\n"); - ret = -EBUSY; - goto err; - } - if (uid_valid(binder_context_mgr_uid)) { - if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { - pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", - from_kuid(&init_user_ns, current->cred->euid), - from_kuid(&init_user_ns, binder_context_mgr_uid)); - ret = -EPERM; - goto err; - } - } else - binder_context_mgr_uid = current->cred->euid; - binder_context_mgr_node = binder_new_node(proc, NULL, NULL); - if (binder_context_mgr_node == NULL) { - ret = -ENOMEM; - goto err; - } - binder_context_mgr_node->local_weak_refs++; - binder_context_mgr_node->local_strong_refs++; - binder_context_mgr_node->has_strong_ref = 1; - binder_context_mgr_node->has_weak_ref = 1; - break; - case BINDER_THREAD_EXIT: - binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", - proc->pid, thread->pid); - binder_free_thread(proc, thread); - thread = NULL; - break; - case BINDER_VERSION: - if (size != sizeof(struct binder_version)) { - ret = -EINVAL; - goto err; - } - if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { - ret = -EINVAL; - goto err; - } - break; - default: - ret = -EINVAL; - goto err; - } - ret = 0; -err: - if (thread) - thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; - binder_unlock(__func__); - wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); - if (ret && ret != -ERESTARTSYS) - pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); -err_unlocked: - trace_binder_ioctl_done(ret); - return ret; -} - -static void binder_vma_open(struct vm_area_struct *vma) -{ - struct binder_proc *proc = vma->vm_private_data; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, - (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, - (unsigned long)pgprot_val(vma->vm_page_prot)); -} - -static void binder_vma_close(struct vm_area_struct *vma) -{ - struct binder_proc *proc = vma->vm_private_data; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, - (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, - (unsigned long)pgprot_val(vma->vm_page_prot)); - proc->vma = NULL; - proc->vma_vm_mm = NULL; - binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); -} - -static struct vm_operations_struct binder_vm_ops = { - .open = binder_vma_open, - .close = binder_vma_close, -}; - -static int binder_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - struct vm_struct *area; - struct binder_proc *proc = filp->private_data; - const char *failure_string; - struct binder_buffer *buffer; - - if (proc->tsk != current) - return -EINVAL; - - if ((vma->vm_end - vma->vm_start) > SZ_4M) - vma->vm_end = vma->vm_start + SZ_4M; - - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, - (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, - (unsigned long)pgprot_val(vma->vm_page_prot)); - - if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { - ret = -EPERM; - failure_string = "bad vm_flags"; - goto err_bad_arg; - } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; - - mutex_lock(&binder_mmap_lock); - if (proc->buffer) { - ret = -EBUSY; - failure_string = "already mapped"; - goto err_already_mapped; - } - - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - proc->buffer = area->addr; - proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; - mutex_unlock(&binder_mmap_lock); - -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { - pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif - proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); - if (proc->pages == NULL) { - ret = -ENOMEM; - failure_string = "alloc page array"; - goto err_alloc_pages_failed; - } - proc->buffer_size = vma->vm_end - vma->vm_start; - - vma->vm_ops = &binder_vm_ops; - vma->vm_private_data = proc; - - if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { - ret = -ENOMEM; - failure_string = "alloc small buf"; - goto err_alloc_small_buf_failed; - } - buffer = proc->buffer; - INIT_LIST_HEAD(&proc->buffers); - list_add(&buffer->entry, &proc->buffers); - buffer->free = 1; - binder_insert_free_buffer(proc, buffer); - proc->free_async_space = proc->buffer_size / 2; - barrier(); - proc->files = get_files_struct(current); - proc->vma = vma; - proc->vma_vm_mm = vma->vm_mm; - - /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", - proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ - return 0; - -err_alloc_small_buf_failed: - kfree(proc->pages); - proc->pages = NULL; -err_alloc_pages_failed: - mutex_lock(&binder_mmap_lock); - vfree(proc->buffer); - proc->buffer = NULL; -err_get_vm_area_failed: -err_already_mapped: - mutex_unlock(&binder_mmap_lock); -err_bad_arg: - pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", - proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); - return ret; -} - -static int binder_open(struct inode *nodp, struct file *filp) -{ - struct binder_proc *proc; - - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", - current->group_leader->pid, current->pid); - - proc = kzalloc(sizeof(*proc), GFP_KERNEL); - if (proc == NULL) - return -ENOMEM; - get_task_struct(current); - proc->tsk = current; - INIT_LIST_HEAD(&proc->todo); - init_waitqueue_head(&proc->wait); - proc->default_priority = task_nice(current); - - binder_lock(__func__); - - binder_stats_created(BINDER_STAT_PROC); - hlist_add_head(&proc->proc_node, &binder_procs); - proc->pid = current->group_leader->pid; - INIT_LIST_HEAD(&proc->delivered_death); - filp->private_data = proc; - - binder_unlock(__func__); - - if (binder_debugfs_dir_entry_proc) { - char strbuf[11]; - snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); - proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, - binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); - } - - return 0; -} - -static int binder_flush(struct file *filp, fl_owner_t id) -{ - struct binder_proc *proc = filp->private_data; - - binder_defer_work(proc, BINDER_DEFERRED_FLUSH); - - return 0; -} - -static void binder_deferred_flush(struct binder_proc *proc) -{ - struct rb_node *n; - int wake_count = 0; - for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { - struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; - if (thread->looper & BINDER_LOOPER_STATE_WAITING) { - wake_up_interruptible(&thread->wait); - wake_count++; - } - } - wake_up_interruptible_all(&proc->wait); - - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_flush: %d woke %d threads\n", proc->pid, - wake_count); -} - -static int binder_release(struct inode *nodp, struct file *filp) -{ - struct binder_proc *proc = filp->private_data; - debugfs_remove(proc->debugfs_entry); - binder_defer_work(proc, BINDER_DEFERRED_RELEASE); - - return 0; -} - -static int binder_node_release(struct binder_node *node, int refs) -{ - struct binder_ref *ref; - int death = 0; - - list_del_init(&node->work.entry); - binder_release_work(&node->async_todo); - - if (hlist_empty(&node->refs)) { - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - - return refs; - } - - node->proc = NULL; - node->local_strong_refs = 0; - node->local_weak_refs = 0; - hlist_add_head(&node->dead_node, &binder_dead_nodes); - - hlist_for_each_entry(ref, &node->refs, node_entry) { - refs++; - - if (!ref->death) - continue; - - death++; - - if (list_empty(&ref->death->work.entry)) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - list_add_tail(&ref->death->work.entry, - &ref->proc->todo); - wake_up_interruptible(&ref->proc->wait); - } else - BUG(); - } - - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "node %d now dead, refs %d, death %d\n", - node->debug_id, refs, death); - - return refs; -} - -static void binder_deferred_release(struct binder_proc *proc) -{ - struct binder_transaction *t; - struct rb_node *n; - int threads, nodes, incoming_refs, outgoing_refs, buffers, - active_transactions, page_count; - - BUG_ON(proc->vma); - BUG_ON(proc->files); - - hlist_del(&proc->proc_node); - - if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%s: %d context_mgr_node gone\n", - __func__, proc->pid); - binder_context_mgr_node = NULL; - } - - threads = 0; - active_transactions = 0; - while ((n = rb_first(&proc->threads))) { - struct binder_thread *thread; - - thread = rb_entry(n, struct binder_thread, rb_node); - threads++; - active_transactions += binder_free_thread(proc, thread); - } - - nodes = 0; - incoming_refs = 0; - while ((n = rb_first(&proc->nodes))) { - struct binder_node *node; - - node = rb_entry(n, struct binder_node, rb_node); - nodes++; - rb_erase(&node->rb_node, &proc->nodes); - incoming_refs = binder_node_release(node, incoming_refs); - } - - outgoing_refs = 0; - while ((n = rb_first(&proc->refs_by_desc))) { - struct binder_ref *ref; - - ref = rb_entry(n, struct binder_ref, rb_node_desc); - outgoing_refs++; - binder_delete_ref(ref); - } - - binder_release_work(&proc->todo); - binder_release_work(&proc->delivered_death); - - buffers = 0; - while ((n = rb_first(&proc->allocated_buffers))) { - struct binder_buffer *buffer; - - buffer = rb_entry(n, struct binder_buffer, rb_node); - - t = buffer->transaction; - if (t) { - t->buffer = NULL; - buffer->transaction = NULL; - pr_err("release proc %d, transaction %d, not freed\n", - proc->pid, t->debug_id); - /*BUG();*/ - } - - binder_free_buf(proc, buffer); - buffers++; - } - - binder_stats_deleted(BINDER_STAT_PROC); - - page_count = 0; - if (proc->pages) { - int i; - - for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; - - if (!proc->pages[i]) - continue; - - page_addr = proc->buffer + i * PAGE_SIZE; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %p not freed\n", - __func__, proc->pid, i, page_addr); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); - __free_page(proc->pages[i]); - page_count++; - } - kfree(proc->pages); - vfree(proc->buffer); - } - - put_task_struct(proc->tsk); - - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", - __func__, proc->pid, threads, nodes, incoming_refs, - outgoing_refs, active_transactions, buffers, page_count); - - kfree(proc); -} - -static void binder_deferred_func(struct work_struct *work) -{ - struct binder_proc *proc; - struct files_struct *files; - - int defer; - do { - binder_lock(__func__); - mutex_lock(&binder_deferred_lock); - if (!hlist_empty(&binder_deferred_list)) { - proc = hlist_entry(binder_deferred_list.first, - struct binder_proc, deferred_work_node); - hlist_del_init(&proc->deferred_work_node); - defer = proc->deferred_work; - proc->deferred_work = 0; - } else { - proc = NULL; - defer = 0; - } - mutex_unlock(&binder_deferred_lock); - - files = NULL; - if (defer & BINDER_DEFERRED_PUT_FILES) { - files = proc->files; - if (files) - proc->files = NULL; - } - - if (defer & BINDER_DEFERRED_FLUSH) - binder_deferred_flush(proc); - - if (defer & BINDER_DEFERRED_RELEASE) - binder_deferred_release(proc); /* frees proc */ - - binder_unlock(__func__); - if (files) - put_files_struct(files); - } while (proc); -} -static DECLARE_WORK(binder_deferred_work, binder_deferred_func); - -static void -binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) -{ - mutex_lock(&binder_deferred_lock); - proc->deferred_work |= defer; - if (hlist_unhashed(&proc->deferred_work_node)) { - hlist_add_head(&proc->deferred_work_node, - &binder_deferred_list); - queue_work(binder_deferred_workqueue, &binder_deferred_work); - } - mutex_unlock(&binder_deferred_lock); -} - -static void print_binder_transaction(struct seq_file *m, const char *prefix, - struct binder_transaction *t) -{ - seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", - prefix, t->debug_id, t, - t->from ? t->from->proc->pid : 0, - t->from ? t->from->pid : 0, - t->to_proc ? t->to_proc->pid : 0, - t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); - if (t->buffer == NULL) { - seq_puts(m, " buffer free\n"); - return; - } - if (t->buffer->target_node) - seq_printf(m, " node %d", - t->buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %p\n", - t->buffer->data_size, t->buffer->offsets_size, - t->buffer->data); -} - -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %p size %zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, - buffer->data_size, buffer->offsets_size, - buffer->transaction ? "active" : "delivered"); -} - -static void print_binder_work(struct seq_file *m, const char *prefix, - const char *transaction_prefix, - struct binder_work *w) -{ - struct binder_node *node; - struct binder_transaction *t; - - switch (w->type) { - case BINDER_WORK_TRANSACTION: - t = container_of(w, struct binder_transaction, work); - print_binder_transaction(m, transaction_prefix, t); - break; - case BINDER_WORK_TRANSACTION_COMPLETE: - seq_printf(m, "%stransaction complete\n", prefix); - break; - case BINDER_WORK_NODE: - node = container_of(w, struct binder_node, work); - seq_printf(m, "%snode work %d: u%p c%p\n", - prefix, node->debug_id, node->ptr, node->cookie); - break; - case BINDER_WORK_DEAD_BINDER: - seq_printf(m, "%shas dead binder\n", prefix); - break; - case BINDER_WORK_DEAD_BINDER_AND_CLEAR: - seq_printf(m, "%shas cleared dead binder\n", prefix); - break; - case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: - seq_printf(m, "%shas cleared death notification\n", prefix); - break; - default: - seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); - break; - } -} - -static void print_binder_thread(struct seq_file *m, - struct binder_thread *thread, - int print_always) -{ - struct binder_transaction *t; - struct binder_work *w; - size_t start_pos = m->count; - size_t header_pos; - - seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); - header_pos = m->count; - t = thread->transaction_stack; - while (t) { - if (t->from == thread) { - print_binder_transaction(m, - " outgoing transaction", t); - t = t->from_parent; - } else if (t->to_thread == thread) { - print_binder_transaction(m, - " incoming transaction", t); - t = t->to_parent; - } else { - print_binder_transaction(m, " bad transaction", t); - t = NULL; - } - } - list_for_each_entry(w, &thread->todo, entry) { - print_binder_work(m, " ", " pending transaction", w); - } - if (!print_always && m->count == header_pos) - m->count = start_pos; -} - -static void print_binder_node(struct seq_file *m, struct binder_node *node) -{ - struct binder_ref *ref; - struct binder_work *w; - int count; - - count = 0; - hlist_for_each_entry(ref, &node->refs, node_entry) - count++; - - seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", - node->debug_id, node->ptr, node->cookie, - node->has_strong_ref, node->has_weak_ref, - node->local_strong_refs, node->local_weak_refs, - node->internal_strong_refs, count); - if (count) { - seq_puts(m, " proc"); - hlist_for_each_entry(ref, &node->refs, node_entry) - seq_printf(m, " %d", ref->proc->pid); - } - seq_puts(m, "\n"); - list_for_each_entry(w, &node->async_todo, entry) - print_binder_work(m, " ", - " pending async transaction", w); -} - -static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) -{ - seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", - ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", - ref->node->debug_id, ref->strong, ref->weak, ref->death); -} - -static void print_binder_proc(struct seq_file *m, - struct binder_proc *proc, int print_all) -{ - struct binder_work *w; - struct rb_node *n; - size_t start_pos = m->count; - size_t header_pos; - - seq_printf(m, "proc %d\n", proc->pid); - header_pos = m->count; - - for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) - print_binder_thread(m, rb_entry(n, struct binder_thread, - rb_node), print_all); - for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { - struct binder_node *node = rb_entry(n, struct binder_node, - rb_node); - if (print_all || node->has_async_transaction) - print_binder_node(m, node); - } - if (print_all) { - for (n = rb_first(&proc->refs_by_desc); - n != NULL; - n = rb_next(n)) - print_binder_ref(m, rb_entry(n, struct binder_ref, - rb_node_desc)); - } - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); - list_for_each_entry(w, &proc->todo, entry) - print_binder_work(m, " ", " pending transaction", w); - list_for_each_entry(w, &proc->delivered_death, entry) { - seq_puts(m, " has delivered dead binder\n"); - break; - } - if (!print_all && m->count == header_pos) - m->count = start_pos; -} - -static const char * const binder_return_strings[] = { - "BR_ERROR", - "BR_OK", - "BR_TRANSACTION", - "BR_REPLY", - "BR_ACQUIRE_RESULT", - "BR_DEAD_REPLY", - "BR_TRANSACTION_COMPLETE", - "BR_INCREFS", - "BR_ACQUIRE", - "BR_RELEASE", - "BR_DECREFS", - "BR_ATTEMPT_ACQUIRE", - "BR_NOOP", - "BR_SPAWN_LOOPER", - "BR_FINISHED", - "BR_DEAD_BINDER", - "BR_CLEAR_DEATH_NOTIFICATION_DONE", - "BR_FAILED_REPLY" -}; - -static const char * const binder_command_strings[] = { - "BC_TRANSACTION", - "BC_REPLY", - "BC_ACQUIRE_RESULT", - "BC_FREE_BUFFER", - "BC_INCREFS", - "BC_ACQUIRE", - "BC_RELEASE", - "BC_DECREFS", - "BC_INCREFS_DONE", - "BC_ACQUIRE_DONE", - "BC_ATTEMPT_ACQUIRE", - "BC_REGISTER_LOOPER", - "BC_ENTER_LOOPER", - "BC_EXIT_LOOPER", - "BC_REQUEST_DEATH_NOTIFICATION", - "BC_CLEAR_DEATH_NOTIFICATION", - "BC_DEAD_BINDER_DONE" -}; - -static const char * const binder_objstat_strings[] = { - "proc", - "thread", - "node", - "ref", - "death", - "transaction", - "transaction_complete" -}; - -static void print_binder_stats(struct seq_file *m, const char *prefix, - struct binder_stats *stats) -{ - int i; - - BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != - ARRAY_SIZE(binder_command_strings)); - for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { - if (stats->bc[i]) - seq_printf(m, "%s%s: %d\n", prefix, - binder_command_strings[i], stats->bc[i]); - } - - BUILD_BUG_ON(ARRAY_SIZE(stats->br) != - ARRAY_SIZE(binder_return_strings)); - for (i = 0; i < ARRAY_SIZE(stats->br); i++) { - if (stats->br[i]) - seq_printf(m, "%s%s: %d\n", prefix, - binder_return_strings[i], stats->br[i]); - } - - BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != - ARRAY_SIZE(binder_objstat_strings)); - BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != - ARRAY_SIZE(stats->obj_deleted)); - for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { - if (stats->obj_created[i] || stats->obj_deleted[i]) - seq_printf(m, "%s%s: active %d total %d\n", prefix, - binder_objstat_strings[i], - stats->obj_created[i] - stats->obj_deleted[i], - stats->obj_created[i]); - } -} - -static void print_binder_proc_stats(struct seq_file *m, - struct binder_proc *proc) -{ - struct binder_work *w; - struct rb_node *n; - int count, strong, weak; - - seq_printf(m, "proc %d\n", proc->pid); - count = 0; - for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) - count++; - seq_printf(m, " threads: %d\n", count); - seq_printf(m, " requested threads: %d+%d/%d\n" - " ready threads %d\n" - " free async space %zd\n", proc->requested_threads, - proc->requested_threads_started, proc->max_threads, - proc->ready_threads, proc->free_async_space); - count = 0; - for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) - count++; - seq_printf(m, " nodes: %d\n", count); - count = 0; - strong = 0; - weak = 0; - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { - struct binder_ref *ref = rb_entry(n, struct binder_ref, - rb_node_desc); - count++; - strong += ref->strong; - weak += ref->weak; - } - seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); - - count = 0; - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - count++; - seq_printf(m, " buffers: %d\n", count); - - count = 0; - list_for_each_entry(w, &proc->todo, entry) { - switch (w->type) { - case BINDER_WORK_TRANSACTION: - count++; - break; - default: - break; - } - } - seq_printf(m, " pending transactions: %d\n", count); - - print_binder_stats(m, " ", &proc->stats); -} - - -static int binder_state_show(struct seq_file *m, void *unused) -{ - struct binder_proc *proc; - struct binder_node *node; - int do_lock = !binder_debug_no_lock; - - if (do_lock) - binder_lock(__func__); - - seq_puts(m, "binder state:\n"); - - if (!hlist_empty(&binder_dead_nodes)) - seq_puts(m, "dead nodes:\n"); - hlist_for_each_entry(node, &binder_dead_nodes, dead_node) - print_binder_node(m, node); - - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 1); - if (do_lock) - binder_unlock(__func__); - return 0; -} - -static int binder_stats_show(struct seq_file *m, void *unused) -{ - struct binder_proc *proc; - int do_lock = !binder_debug_no_lock; - - if (do_lock) - binder_lock(__func__); - - seq_puts(m, "binder stats:\n"); - - print_binder_stats(m, "", &binder_stats); - - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc_stats(m, proc); - if (do_lock) - binder_unlock(__func__); - return 0; -} - -static int binder_transactions_show(struct seq_file *m, void *unused) -{ - struct binder_proc *proc; - int do_lock = !binder_debug_no_lock; - - if (do_lock) - binder_lock(__func__); - - seq_puts(m, "binder transactions:\n"); - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 0); - if (do_lock) - binder_unlock(__func__); - return 0; -} - -static int binder_proc_show(struct seq_file *m, void *unused) -{ - struct binder_proc *proc = m->private; - int do_lock = !binder_debug_no_lock; - - if (do_lock) - binder_lock(__func__); - seq_puts(m, "binder proc state:\n"); - print_binder_proc(m, proc, 1); - if (do_lock) - binder_unlock(__func__); - return 0; -} - -static void print_binder_transaction_log_entry(struct seq_file *m, - struct binder_transaction_log_entry *e) -{ - seq_printf(m, - "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", - e->debug_id, (e->call_type == 2) ? "reply" : - ((e->call_type == 1) ? "async" : "call "), e->from_proc, - e->from_thread, e->to_proc, e->to_thread, e->to_node, - e->target_handle, e->data_size, e->offsets_size); -} - -static int binder_transaction_log_show(struct seq_file *m, void *unused) -{ - struct binder_transaction_log *log = m->private; - int i; - - if (log->full) { - for (i = log->next; i < ARRAY_SIZE(log->entry); i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - } - for (i = 0; i < log->next; i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - return 0; -} - -static const struct file_operations binder_fops = { - .owner = THIS_MODULE, - .poll = binder_poll, - .unlocked_ioctl = binder_ioctl, - .mmap = binder_mmap, - .open = binder_open, - .flush = binder_flush, - .release = binder_release, -}; - -static struct miscdevice binder_miscdev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "binder", - .fops = &binder_fops -}; - -BINDER_DEBUG_ENTRY(state); -BINDER_DEBUG_ENTRY(stats); -BINDER_DEBUG_ENTRY(transactions); -BINDER_DEBUG_ENTRY(transaction_log); - -static int __init binder_init(void) -{ - int ret; - - binder_deferred_workqueue = create_singlethread_workqueue("binder"); - if (!binder_deferred_workqueue) - return -ENOMEM; - - binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); - if (binder_debugfs_dir_entry_root) - binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", - binder_debugfs_dir_entry_root); - ret = misc_register(&binder_miscdev); - if (binder_debugfs_dir_entry_root) { - debugfs_create_file("state", - S_IRUGO, - binder_debugfs_dir_entry_root, - NULL, - &binder_state_fops); - debugfs_create_file("stats", - S_IRUGO, - binder_debugfs_dir_entry_root, - NULL, - &binder_stats_fops); - debugfs_create_file("transactions", - S_IRUGO, - binder_debugfs_dir_entry_root, - NULL, - &binder_transactions_fops); - debugfs_create_file("transaction_log", - S_IRUGO, - binder_debugfs_dir_entry_root, - &binder_transaction_log, - &binder_transaction_log_fops); - debugfs_create_file("failed_transaction_log", - S_IRUGO, - binder_debugfs_dir_entry_root, - &binder_transaction_log_failed, - &binder_transaction_log_fops); - } - return ret; -} - -device_initcall(binder_init); - -#define CREATE_TRACE_POINTS -#include "binder_trace.h" - -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h deleted file mode 100644 index dbe81ceca1bd..000000000000 --- a/drivers/staging/android/binder.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2008 Google, Inc. - * - * Based on, but no longer compatible with, the original - * OpenBinder.org binder driver interface, which is: - * - * Copyright (c) 2005 Palmsource, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _LINUX_BINDER_H -#define _LINUX_BINDER_H - -#include <linux/ioctl.h> - -#define B_PACK_CHARS(c1, c2, c3, c4) \ - ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) -#define B_TYPE_LARGE 0x85 - -enum { - BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), -}; - -enum { - FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, - FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, -}; - -/* - * This is the flattened representation of a Binder object for transfer - * between processes. The 'offsets' supplied as part of a binder transaction - * contains offsets into the data where these structures occur. The Binder - * driver takes care of re-writing the structure type and data as it moves - * between processes. - */ -struct flat_binder_object { - /* 8 bytes for large_flat_header. */ - unsigned long type; - unsigned long flags; - - /* 8 bytes of data. */ - union { - void __user *binder; /* local object */ - signed long handle; /* remote object */ - }; - - /* extra data associated with local object */ - void __user *cookie; -}; - -/* - * On 64-bit platforms where user code may run in 32-bits the driver must - * translate the buffer (and local binder) addresses appropriately. - */ - -struct binder_write_read { - signed long write_size; /* bytes to write */ - signed long write_consumed; /* bytes consumed by driver */ - unsigned long write_buffer; - signed long read_size; /* bytes to read */ - signed long read_consumed; /* bytes consumed by driver */ - unsigned long read_buffer; -}; - -/* Use with BINDER_VERSION, driver fills in fields. */ -struct binder_version { - /* driver protocol version -- increment with incompatible change */ - signed long protocol_version; -}; - -/* This is the current protocol version. */ -#define BINDER_CURRENT_PROTOCOL_VERSION 7 - -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) -#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) -#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) -#define BINDER_VERSION _IOWR('b', 9, struct binder_version) - -/* - * NOTE: Two special error codes you should check for when calling - * in to the driver are: - * - * EINTR -- The operation has been interupted. This should be - * handled by retrying the ioctl() until a different error code - * is returned. - * - * ECONNREFUSED -- The driver is no longer accepting operations - * from your process. That is, the process is being destroyed. - * You should handle this by exiting from your process. Note - * that once this error code is returned, all further calls to - * the driver from any thread will return this same code. - */ - -enum transaction_flags { - TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ - TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ - TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ - TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ -}; - -struct binder_transaction_data { - /* The first two are only used for bcTRANSACTION and brTRANSACTION, - * identifying the target and contents of the transaction. - */ - union { - size_t handle; /* target descriptor of command transaction */ - void *ptr; /* target descriptor of return transaction */ - } target; - void *cookie; /* target object cookie */ - unsigned int code; /* transaction command */ - - /* General information about the transaction. */ - unsigned int flags; - pid_t sender_pid; - uid_t sender_euid; - size_t data_size; /* number of bytes of data */ - size_t offsets_size; /* number of bytes of offsets */ - - /* If this transaction is inline, the data immediately - * follows here; otherwise, it ends with a pointer to - * the data buffer. - */ - union { - struct { - /* transaction data */ - const void __user *buffer; - /* offsets from buffer to flat_binder_object structs */ - const void __user *offsets; - } ptr; - uint8_t buf[8]; - } data; -}; - -struct binder_ptr_cookie { - void *ptr; - void *cookie; -}; - -struct binder_pri_desc { - int priority; - int desc; -}; - -struct binder_pri_ptr_cookie { - int priority; - void *ptr; - void *cookie; -}; - -enum binder_driver_return_protocol { - BR_ERROR = _IOR('r', 0, int), - /* - * int: error code - */ - - BR_OK = _IO('r', 1), - /* No parameters! */ - - BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), - BR_REPLY = _IOR('r', 3, struct binder_transaction_data), - /* - * binder_transaction_data: the received command. - */ - - BR_ACQUIRE_RESULT = _IOR('r', 4, int), - /* - * not currently supported - * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. - * Else the remote object has acquired a primary reference. - */ - - BR_DEAD_REPLY = _IO('r', 5), - /* - * The target of the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. - */ - - BR_TRANSACTION_COMPLETE = _IO('r', 6), - /* - * No parameters... always refers to the last transaction requested - * (including replies). Note that this will be sent even for - * asynchronous transactions. - */ - - BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), - BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), - BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), - BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), - /* - * not currently supported - * int: priority - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_NOOP = _IO('r', 12), - /* - * No parameters. Do nothing and examine the next command. It exists - * primarily so that we can replace it with a BR_SPAWN_LOOPER command. - */ - - BR_SPAWN_LOOPER = _IO('r', 13), - /* - * No parameters. The driver has determined that a process has no - * threads waiting to service incoming transactions. When a process - * receives this command, it must spawn a new service thread and - * register it via bcENTER_LOOPER. - */ - - BR_FINISHED = _IO('r', 14), - /* - * not currently supported - * stop threadpool thread - */ - - BR_DEAD_BINDER = _IOR('r', 15, void *), - /* - * void *: cookie - */ - BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), - /* - * void *: cookie - */ - - BR_FAILED_REPLY = _IO('r', 17), - /* - * The the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. - */ -}; - -enum binder_driver_command_protocol { - BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), - BC_REPLY = _IOW('c', 1, struct binder_transaction_data), - /* - * binder_transaction_data: the sent command. - */ - - BC_ACQUIRE_RESULT = _IOW('c', 2, int), - /* - * not currently supported - * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. - * Else you have acquired a primary reference on the object. - */ - - BC_FREE_BUFFER = _IOW('c', 3, int), - /* - * void *: ptr to transaction data received on a read - */ - - BC_INCREFS = _IOW('c', 4, int), - BC_ACQUIRE = _IOW('c', 5, int), - BC_RELEASE = _IOW('c', 6, int), - BC_DECREFS = _IOW('c', 7, int), - /* - * int: descriptor - */ - - BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), - BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), - /* - * not currently supported - * int: priority - * int: descriptor - */ - - BC_REGISTER_LOOPER = _IO('c', 11), - /* - * No parameters. - * Register a spawned looper thread with the device. - */ - - BC_ENTER_LOOPER = _IO('c', 12), - BC_EXIT_LOOPER = _IO('c', 13), - /* - * No parameters. - * These two commands are sent as an application-level thread - * enters and exits the binder loop, respectively. They are - * used so the binder can have an accurate count of the number - * of looping threads it has available. - */ - - BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie - */ - - BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie - */ - - BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), - /* - * void *: cookie - */ -}; - -#endif /* _LINUX_BINDER_H */ - diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h deleted file mode 100644 index 82a567c2af67..000000000000 --- a/drivers/staging/android/binder_trace.h +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Copyright (C) 2012 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM binder - -#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _BINDER_TRACE_H - -#include <linux/tracepoint.h> - -struct binder_buffer; -struct binder_node; -struct binder_proc; -struct binder_ref; -struct binder_thread; -struct binder_transaction; - -TRACE_EVENT(binder_ioctl, - TP_PROTO(unsigned int cmd, unsigned long arg), - TP_ARGS(cmd, arg), - - TP_STRUCT__entry( - __field(unsigned int, cmd) - __field(unsigned long, arg) - ), - TP_fast_assign( - __entry->cmd = cmd; - __entry->arg = arg; - ), - TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) -); - -DECLARE_EVENT_CLASS(binder_lock_class, - TP_PROTO(const char *tag), - TP_ARGS(tag), - TP_STRUCT__entry( - __field(const char *, tag) - ), - TP_fast_assign( - __entry->tag = tag; - ), - TP_printk("tag=%s", __entry->tag) -); - -#define DEFINE_BINDER_LOCK_EVENT(name) \ -DEFINE_EVENT(binder_lock_class, name, \ - TP_PROTO(const char *func), \ - TP_ARGS(func)) - -DEFINE_BINDER_LOCK_EVENT(binder_lock); -DEFINE_BINDER_LOCK_EVENT(binder_locked); -DEFINE_BINDER_LOCK_EVENT(binder_unlock); - -DECLARE_EVENT_CLASS(binder_function_return_class, - TP_PROTO(int ret), - TP_ARGS(ret), - TP_STRUCT__entry( - __field(int, ret) - ), - TP_fast_assign( - __entry->ret = ret; - ), - TP_printk("ret=%d", __entry->ret) -); - -#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \ -DEFINE_EVENT(binder_function_return_class, name, \ - TP_PROTO(int ret), \ - TP_ARGS(ret)) - -DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); -DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); -DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); - -TRACE_EVENT(binder_wait_for_work, - TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), - TP_ARGS(proc_work, transaction_stack, thread_todo), - - TP_STRUCT__entry( - __field(bool, proc_work) - __field(bool, transaction_stack) - __field(bool, thread_todo) - ), - TP_fast_assign( - __entry->proc_work = proc_work; - __entry->transaction_stack = transaction_stack; - __entry->thread_todo = thread_todo; - ), - TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", - __entry->proc_work, __entry->transaction_stack, - __entry->thread_todo) -); - -TRACE_EVENT(binder_transaction, - TP_PROTO(bool reply, struct binder_transaction *t, - struct binder_node *target_node), - TP_ARGS(reply, t, target_node), - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, target_node) - __field(int, to_proc) - __field(int, to_thread) - __field(int, reply) - __field(unsigned int, code) - __field(unsigned int, flags) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->target_node = target_node ? target_node->debug_id : 0; - __entry->to_proc = t->to_proc->pid; - __entry->to_thread = t->to_thread ? t->to_thread->pid : 0; - __entry->reply = reply; - __entry->code = t->code; - __entry->flags = t->flags; - ), - TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", - __entry->debug_id, __entry->target_node, - __entry->to_proc, __entry->to_thread, - __entry->reply, __entry->flags, __entry->code) -); - -TRACE_EVENT(binder_transaction_received, - TP_PROTO(struct binder_transaction *t), - TP_ARGS(t), - - TP_STRUCT__entry( - __field(int, debug_id) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - ), - TP_printk("transaction=%d", __entry->debug_id) -); - -TRACE_EVENT(binder_transaction_node_to_ref, - TP_PROTO(struct binder_transaction *t, struct binder_node *node, - struct binder_ref *ref), - TP_ARGS(t, node, ref), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, node_debug_id) - __field(void __user *, node_ptr) - __field(int, ref_debug_id) - __field(uint32_t, ref_desc) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->node_debug_id = node->debug_id; - __entry->node_ptr = node->ptr; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; - ), - TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d", - __entry->debug_id, __entry->node_debug_id, __entry->node_ptr, - __entry->ref_debug_id, __entry->ref_desc) -); - -TRACE_EVENT(binder_transaction_ref_to_node, - TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), - TP_ARGS(t, ref), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, ref_debug_id) - __field(uint32_t, ref_desc) - __field(int, node_debug_id) - __field(void __user *, node_ptr) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; - __entry->node_debug_id = ref->node->debug_id; - __entry->node_ptr = ref->node->ptr; - ), - TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p", - __entry->debug_id, __entry->node_debug_id, - __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr) -); - -TRACE_EVENT(binder_transaction_ref_to_ref, - TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, - struct binder_ref *dest_ref), - TP_ARGS(t, src_ref, dest_ref), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, node_debug_id) - __field(int, src_ref_debug_id) - __field(uint32_t, src_ref_desc) - __field(int, dest_ref_debug_id) - __field(uint32_t, dest_ref_desc) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->node_debug_id = src_ref->node->debug_id; - __entry->src_ref_debug_id = src_ref->debug_id; - __entry->src_ref_desc = src_ref->desc; - __entry->dest_ref_debug_id = dest_ref->debug_id; - __entry->dest_ref_desc = dest_ref->desc; - ), - TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", - __entry->debug_id, __entry->node_debug_id, - __entry->src_ref_debug_id, __entry->src_ref_desc, - __entry->dest_ref_debug_id, __entry->dest_ref_desc) -); - -TRACE_EVENT(binder_transaction_fd, - TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), - TP_ARGS(t, src_fd, dest_fd), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, src_fd) - __field(int, dest_fd) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->src_fd = src_fd; - __entry->dest_fd = dest_fd; - ), - TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", - __entry->debug_id, __entry->src_fd, __entry->dest_fd) -); - -DECLARE_EVENT_CLASS(binder_buffer_class, - TP_PROTO(struct binder_buffer *buf), - TP_ARGS(buf), - TP_STRUCT__entry( - __field(int, debug_id) - __field(size_t, data_size) - __field(size_t, offsets_size) - ), - TP_fast_assign( - __entry->debug_id = buf->debug_id; - __entry->data_size = buf->data_size; - __entry->offsets_size = buf->offsets_size; - ), - TP_printk("transaction=%d data_size=%zd offsets_size=%zd", - __entry->debug_id, __entry->data_size, __entry->offsets_size) -); - -DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, - TP_PROTO(struct binder_buffer *buffer), - TP_ARGS(buffer)); - -DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, - TP_PROTO(struct binder_buffer *buffer), - TP_ARGS(buffer)); - -DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, - TP_PROTO(struct binder_buffer *buffer), - TP_ARGS(buffer)); - -TRACE_EVENT(binder_update_page_range, - TP_PROTO(struct binder_proc *proc, bool allocate, - void *start, void *end), - TP_ARGS(proc, allocate, start, end), - TP_STRUCT__entry( - __field(int, proc) - __field(bool, allocate) - __field(size_t, offset) - __field(size_t, size) - ), - TP_fast_assign( - __entry->proc = proc->pid; - __entry->allocate = allocate; - __entry->offset = start - proc->buffer; - __entry->size = end - start; - ), - TP_printk("proc=%d allocate=%d offset=%zu size=%zu", - __entry->proc, __entry->allocate, - __entry->offset, __entry->size) -); - -TRACE_EVENT(binder_command, - TP_PROTO(uint32_t cmd), - TP_ARGS(cmd), - TP_STRUCT__entry( - __field(uint32_t, cmd) - ), - TP_fast_assign( - __entry->cmd = cmd; - ), - TP_printk("cmd=0x%x %s", - __entry->cmd, - _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? - binder_command_strings[_IOC_NR(__entry->cmd)] : - "unknown") -); - -TRACE_EVENT(binder_return, - TP_PROTO(uint32_t cmd), - TP_ARGS(cmd), - TP_STRUCT__entry( - __field(uint32_t, cmd) - ), - TP_fast_assign( - __entry->cmd = cmd; - ), - TP_printk("cmd=0x%x %s", - __entry->cmd, - _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? - binder_return_strings[_IOC_NR(__entry->cmd)] : - "unknown") -); - -#endif /* _BINDER_TRACE_H */ - -#undef TRACE_INCLUDE_PATH -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE binder_trace -#include <trace/define_trace.h> diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig new file mode 100644 index 000000000000..56f7f999377e --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Kconfig @@ -0,0 +1,49 @@ +config FIQ_DEBUGGER + bool "FIQ Mode Serial Debugger" + default n + depends on ARM || ARM64 + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. + +config FIQ_DEBUGGER_NO_SLEEP + bool "Keep serial debugger active" + depends on FIQ_DEBUGGER + default n + help + Enables the serial debugger at boot. Passing + fiq_debugger.no_sleep on the kernel commandline will + override this config option. + +config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + bool "Don't disable wakeup IRQ when debugger is active" + depends on FIQ_DEBUGGER + default n + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + on some MSM radio builds that ignore the uart clock request in power + collapse. + +config FIQ_DEBUGGER_CONSOLE + bool "Console on FIQ Serial Debugger port" + depends on FIQ_DEBUGGER + default n + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. + +config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE + bool "Put the FIQ debugger into console mode by default" + depends on FIQ_DEBUGGER_CONSOLE + default n + help + If enabled, this puts the fiq debugger into console mode by default. + Otherwise, the fiq debugger will start out in debug mode. + +config FIQ_WATCHDOG + bool + select FIQ_DEBUGGER + select PSTORE_RAM + default n diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile new file mode 100644 index 000000000000..a7ca4871cad3 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Makefile @@ -0,0 +1,4 @@ +obj-y += fiq_debugger.o +obj-$(CONFIG_ARM) += fiq_debugger_arm.o +obj-$(CONFIG_ARM64) += fiq_debugger_arm64.o +obj-$(CONFIG_FIQ_WATCHDOG) += fiq_watchdog.o diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c new file mode 100644 index 000000000000..1d733624d70a --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c @@ -0,0 +1,1213 @@ +/* + * drivers/staging/android/fiq_debugger.c + * + * Serial Debugger Interface accessed through an FIQ interrupt. + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/console.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/kernel_stat.h> +#include <linux/kmsg_dump.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/timer.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/wakelock.h> + +#ifdef CONFIG_FIQ_GLUE +#include <asm/fiq_glue.h> +#endif + +#include <linux/uaccess.h> + +#include "fiq_debugger.h" +#include "fiq_debugger_priv.h" +#include "fiq_debugger_ringbuf.h" + +#define DEBUG_MAX 64 +#define MAX_UNHANDLED_FIQ_COUNT 1000000 + +#define MAX_FIQ_DEBUGGER_PORTS 4 + +struct fiq_debugger_state { +#ifdef CONFIG_FIQ_GLUE + struct fiq_glue_handler handler; +#endif + struct fiq_debugger_output output; + + int fiq; + int uart_irq; + int signal_irq; + int wakeup_irq; + bool wakeup_irq_no_set_wake; + struct clk *clk; + struct fiq_debugger_pdata *pdata; + struct platform_device *pdev; + + char debug_cmd[DEBUG_MAX]; + int debug_busy; + int debug_abort; + + char debug_buf[DEBUG_MAX]; + int debug_count; + + bool no_sleep; + bool debug_enable; + bool ignore_next_wakeup_irq; + struct timer_list sleep_timer; + spinlock_t sleep_timer_lock; + bool uart_enabled; + struct wake_lock debugger_wake_lock; + bool console_enable; + int current_cpu; + atomic_t unhandled_fiq_count; + bool in_fiq; + + struct work_struct work; + spinlock_t work_lock; + char work_cmd[DEBUG_MAX]; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + spinlock_t console_lock; + struct console console; + struct tty_port tty_port; + struct fiq_debugger_ringbuf *tty_rbuf; + bool syslog_dumping; +#endif + + unsigned int last_irqs[NR_IRQS]; + unsigned int last_local_timer_irqs[NR_CPUS]; +}; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +struct tty_driver *fiq_tty_driver; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP +static bool initial_no_sleep = true; +#else +static bool initial_no_sleep; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE +static bool initial_debug_enable = true; +static bool initial_console_enable = true; +#else +static bool initial_debug_enable; +static bool initial_console_enable; +#endif + +static bool fiq_kgdb_enable; + +module_param_named(no_sleep, initial_no_sleep, bool, 0644); +module_param_named(debug_enable, initial_debug_enable, bool, 0644); +module_param_named(console_enable, initial_console_enable, bool, 0644); +module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644); + +#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {} +#else +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + enable_irq(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + enable_irq_wake(state->wakeup_irq); +} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + disable_irq_nosync(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + disable_irq_wake(state->wakeup_irq); +} +#endif + +static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state) +{ + return (state->fiq >= 0); +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_force_irq(struct fiq_debugger_state *state) +{ + unsigned int irq = state->signal_irq; + + if (WARN_ON(!fiq_debugger_have_fiq(state))) + return; + if (state->pdata->force_irq) { + state->pdata->force_irq(state->pdev, irq); + } else { + struct irq_chip *chip = irq_get_chip(irq); + if (chip && chip->irq_retrigger) + chip->irq_retrigger(irq_get_irq_data(irq)); + } +} +#endif + +static void fiq_debugger_uart_enable(struct fiq_debugger_state *state) +{ + if (state->clk) + clk_enable(state->clk); + if (state->pdata->uart_enable) + state->pdata->uart_enable(state->pdev); +} + +static void fiq_debugger_uart_disable(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_disable) + state->pdata->uart_disable(state->pdev); + if (state->clk) + clk_disable(state->clk); +} + +static void fiq_debugger_uart_flush(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_flush) + state->pdata->uart_flush(state->pdev); +} + +static void fiq_debugger_putc(struct fiq_debugger_state *state, char c) +{ + state->pdata->uart_putc(state->pdev, c); +} + +static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, c); + } +} + +static void fiq_debugger_prompt(struct fiq_debugger_state *state) +{ + fiq_debugger_puts(state, "debug> "); +} + +static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state) +{ + char buf[512]; + size_t len; + struct kmsg_dumper dumper = { .active = true }; + + + kmsg_dump_rewind_nolock(&dumper); + while (kmsg_dump_get_line_nolock(&dumper, true, buf, + sizeof(buf) - 1, &len)) { + buf[len] = 0; + fiq_debugger_puts(state, buf); + } +} + +static void fiq_debugger_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + struct fiq_debugger_state *state; + char buf[256]; + va_list ap; + + state = container_of(output, struct fiq_debugger_state, output); + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + fiq_debugger_puts(state, buf); +} + +/* Safe outside fiq context */ +static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + fiq_debugger_puts(state, buf); + fiq_debugger_uart_flush(state); + local_irq_restore(irq_flags); + return state->debug_abort; +} + +static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state) +{ + int n; + struct irq_desc *desc; + + fiq_debugger_printf(&state->output, + "irqnr total since-last status name\n"); + for_each_irq_desc(n, desc) { + struct irqaction *act = desc->action; + if (!act && !kstat_irqs(n)) + continue; + fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - state->last_irqs[n], + desc->status_use_accessors, + (act && act->name) ? act->name : "???"); + state->last_irqs[n] = kstat_irqs(n); + } +} + +static void fiq_debugger_do_ps(struct fiq_debugger_state *state) +{ + struct task_struct *g; + struct task_struct *p; + unsigned task_state; + static const char stat_nam[] = "RSDTtZX"; + + fiq_debugger_printf(&state->output, "pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_state = p->state ? __ffs(p->state) + 1 : 0; + fiq_debugger_printf(&state->output, + "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm, + task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); + if (task_state == TASK_RUNNING) + fiq_debugger_printf(&state->output, " running\n"); + else + fiq_debugger_printf(&state->output, " %08lx\n", + thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = true; +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = false; +} +#else +extern int do_syslog(int type, char __user *bug, int count); +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + do_syslog(5 /* clear */, NULL, 0); +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + fiq_debugger_dump_kernel_log(state); +} +#endif + +static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq) +{ + if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "sysrq-g blocked\n"); + return; + } + fiq_debugger_begin_syslog_dump(state); + handle_sysrq(rq); + fiq_debugger_end_syslog_dump(state); +} + +#ifdef CONFIG_KGDB +static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state) +{ + if (!fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n"); + return; + } + + fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n"); + state->console_enable = true; + handle_sysrq('g'); +} +#endif + +static void fiq_debugger_schedule_work(struct fiq_debugger_state *state, + char *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&state->work_lock, flags); + if (state->work_cmd[0] != '\0') { + fiq_debugger_printf(&state->output, "work command processor busy\n"); + spin_unlock_irqrestore(&state->work_lock, flags); + return; + } + + strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd)); + spin_unlock_irqrestore(&state->work_lock, flags); + + schedule_work(&state->work); +} + +static void fiq_debugger_work(struct work_struct *work) +{ + struct fiq_debugger_state *state; + char work_cmd[DEBUG_MAX]; + char *cmd; + unsigned long flags; + + state = container_of(work, struct fiq_debugger_state, work); + + spin_lock_irqsave(&state->work_lock, flags); + + strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd)); + state->work_cmd[0] = '\0'; + + spin_unlock_irqrestore(&state->work_lock, flags); + + cmd = work_cmd; + if (!strncmp(cmd, "reboot", 6)) { + cmd += 6; + while (*cmd == ' ') + cmd++; + if (cmd != '\0') + kernel_restart(cmd); + else + kernel_restart(NULL); + } else { + fiq_debugger_printf(&state->output, "unknown work command '%s'\n", + work_cmd); + } +} + +/* This function CANNOT be called in FIQ context */ +static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd) +{ + if (!strcmp(cmd, "ps")) + fiq_debugger_do_ps(state); + if (!strcmp(cmd, "sysrq")) + fiq_debugger_do_sysrq(state, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + fiq_debugger_do_sysrq(state, cmd[6]); +#ifdef CONFIG_KGDB + if (!strcmp(cmd, "kgdb")) + fiq_debugger_do_kgdb(state); +#endif + if (!strncmp(cmd, "reboot", 6)) + fiq_debugger_schedule_work(state, cmd); +} + +static void fiq_debugger_help(struct fiq_debugger_state *state) +{ + fiq_debugger_printf(&state->output, + "FIQ Debugger commands:\n" + " pc PC status\n" + " regs Register dump\n" + " allregs Extended Register dump\n" + " bt Stack trace\n"); + fiq_debugger_printf(&state->output, + " reboot [<c>] Reboot with command <c>\n" + " reset [<c>] Hard reset with command <c>\n" + " irqs Interupt status\n" + " kmsg Kernel log\n" + " version Kernel version\n"); + fiq_debugger_printf(&state->output, + " sleep Allow sleep while in FIQ\n" + " nosleep Disable sleep while in FIQ\n" + " console Switch terminal to console\n" + " cpu Current CPU\n" + " cpu <number> Switch to CPU<number>\n"); + fiq_debugger_printf(&state->output, + " ps Process list\n" + " sysrq sysrq options\n" + " sysrq <param> Execute sysrq with <param>\n"); +#ifdef CONFIG_KGDB + fiq_debugger_printf(&state->output, + " kgdb Enter kernel debugger\n"); +#endif +} + +static void fiq_debugger_take_affinity(void *info) +{ + struct fiq_debugger_state *state = info; + struct cpumask cpumask; + + cpumask_clear(&cpumask); + cpumask_set_cpu(get_cpu(), &cpumask); + + irq_set_affinity(state->uart_irq, &cpumask); +} + +static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu) +{ + if (!fiq_debugger_have_fiq(state)) + smp_call_function_single(cpu, fiq_debugger_take_affinity, state, + false); + state->current_cpu = cpu; +} + +static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, + const char *cmd, const struct pt_regs *regs, + void *svc_sp) +{ + bool signal_helper = false; + + if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { + fiq_debugger_help(state); + } else if (!strcmp(cmd, "pc")) { + fiq_debugger_dump_pc(&state->output, regs); + } else if (!strcmp(cmd, "regs")) { + fiq_debugger_dump_regs(&state->output, regs); + } else if (!strcmp(cmd, "allregs")) { + fiq_debugger_dump_allregs(&state->output, regs); + } else if (!strcmp(cmd, "bt")) { + fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp); + } else if (!strncmp(cmd, "reset", 5)) { + cmd += 5; + while (*cmd == ' ') + cmd++; + if (*cmd) { + char tmp_cmd[32]; + strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd)); + machine_restart(tmp_cmd); + } else { + machine_restart(NULL); + } + } else if (!strcmp(cmd, "irqs")) { + fiq_debugger_dump_irqs(state); + } else if (!strcmp(cmd, "kmsg")) { + fiq_debugger_dump_kernel_log(state); + } else if (!strcmp(cmd, "version")) { + fiq_debugger_printf(&state->output, "%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + state->no_sleep = false; + fiq_debugger_printf(&state->output, "enabling sleep\n"); + } else if (!strcmp(cmd, "nosleep")) { + state->no_sleep = true; + fiq_debugger_printf(&state->output, "disabling sleep\n"); + } else if (!strcmp(cmd, "console")) { + fiq_debugger_printf(&state->output, "console mode\n"); + fiq_debugger_uart_flush(state); + state->console_enable = true; + } else if (!strcmp(cmd, "cpu")) { + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4)) { + unsigned long cpu = 0; + if (strict_strtoul(cmd + 4, 10, &cpu) == 0) + fiq_debugger_switch_cpu(state, cpu); + else + fiq_debugger_printf(&state->output, "invalid cpu\n"); + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else { + if (state->debug_busy) { + fiq_debugger_printf(&state->output, + "command processor busy. trying to abort.\n"); + state->debug_abort = -1; + } else { + strcpy(state->debug_cmd, cmd); + state->debug_busy = 1; + } + + return true; + } + if (!state->console_enable) + fiq_debugger_prompt(state); + + return signal_helper; +} + +static void fiq_debugger_sleep_timer_expired(unsigned long data) +{ + struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->uart_enabled && !state->no_sleep) { + if (state->debug_enable && !state->console_enable) { + state->debug_enable = false; + fiq_debugger_printf_nfiq(state, + "suspending fiq debugger\n"); + } + state->ignore_next_wakeup_irq = true; + fiq_debugger_uart_disable(state); + state->uart_enabled = false; + fiq_debugger_enable_wakeup_irq(state); + } + wake_unlock(&state->debugger_wake_lock); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state) +{ + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { + state->ignore_next_wakeup_irq = false; + } else if (!state->uart_enabled) { + wake_lock(&state->debugger_wake_lock); + fiq_debugger_uart_enable(state); + state->uart_enabled = true; + fiq_debugger_disable_wakeup_irq(state); + mod_timer(&state->sleep_timer, jiffies + HZ / 2); + } + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (!state->no_sleep) + fiq_debugger_puts(state, "WAKEUP\n"); + fiq_debugger_handle_wakeup(state); + + return IRQ_HANDLED; +} + +static +void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state) +{ +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + if (state->tty_port.ops) { + int i; + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + for (i = 0; i < count; i++) { + int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL); + if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) + pr_warn("fiq tty failed to consume byte\n"); + } + tty_flip_buffer_push(&state->tty_port); + } +#endif +} + +static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state) +{ + if (!state->no_sleep) { + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + wake_lock(&state->debugger_wake_lock); + mod_timer(&state->sleep_timer, jiffies + HZ * 5); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); + } + fiq_debugger_handle_console_irq_context(state); + if (state->debug_busy) { + fiq_debugger_irq_exec(state, state->debug_cmd); + if (!state->console_enable) + fiq_debugger_prompt(state); + state->debug_busy = 0; + } +} + +static int fiq_debugger_getc(struct fiq_debugger_state *state) +{ + return state->pdata->uart_getc(state->pdev); +} + +static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state, + int this_cpu, const struct pt_regs *regs, void *svc_sp) +{ + int c; + static int last_c; + int count = 0; + bool signal_helper = false; + + if (this_cpu != state->current_cpu) { + if (state->in_fiq) + return false; + + if (atomic_inc_return(&state->unhandled_fiq_count) != + MAX_UNHANDLED_FIQ_COUNT) + return false; + + fiq_debugger_printf(&state->output, + "fiq_debugger: cpu %d not responding, " + "reverting to cpu %d\n", state->current_cpu, + this_cpu); + + atomic_set(&state->unhandled_fiq_count, 0); + fiq_debugger_switch_cpu(state, this_cpu); + return false; + } + + state->in_fiq = true; + + while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { + count++; + if (!state->debug_enable) { + if ((c == 13) || (c == 10)) { + state->debug_enable = true; + state->debug_count = 0; + fiq_debugger_prompt(state); + } + } else if (c == FIQ_DEBUGGER_BREAK) { + state->console_enable = false; + fiq_debugger_puts(state, "fiq debugger mode\n"); + state->debug_count = 0; + fiq_debugger_prompt(state); +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + } else if (state->console_enable && state->tty_rbuf) { + fiq_debugger_ringbuf_push(state->tty_rbuf, c); + signal_helper = true; +#endif + } else if ((c >= ' ') && (c < 127)) { + if (state->debug_count < (DEBUG_MAX - 1)) { + state->debug_buf[state->debug_count++] = c; + fiq_debugger_putc(state, c); + } + } else if ((c == 8) || (c == 127)) { + if (state->debug_count > 0) { + state->debug_count--; + fiq_debugger_putc(state, 8); + fiq_debugger_putc(state, ' '); + fiq_debugger_putc(state, 8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, '\n'); + } + if (state->debug_count) { + state->debug_buf[state->debug_count] = 0; + state->debug_count = 0; + signal_helper |= + fiq_debugger_fiq_exec(state, + state->debug_buf, + regs, svc_sp); + } else { + fiq_debugger_prompt(state); + } + } + last_c = c; + } + if (!state->console_enable) + fiq_debugger_uart_flush(state); + if (state->pdata->fiq_ack) + state->pdata->fiq_ack(state->pdev, state->fiq); + + /* poke sleep timer if necessary */ + if (state->debug_enable && !state->no_sleep) + signal_helper = true; + + atomic_set(&state->unhandled_fiq_count, 0); + state->in_fiq = false; + + return signal_helper; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_fiq(struct fiq_glue_handler *h, + const struct pt_regs *regs, void *svc_sp) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; + bool need_irq; + + need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs, + svc_sp); + if (need_irq) + fiq_debugger_force_irq(state); +} +#endif + +/* + * When not using FIQs, we only use this single interrupt as an entry point. + * This just effectively takes over the UART interrupt and does all the work + * in this context. + */ +static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + bool not_done; + + fiq_debugger_handle_wakeup(state); + + /* handle the debugger irq in regular context */ + not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(), + get_irq_regs(), + current_thread_info()); + if (not_done) + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +/* + * If FIQs are used, not everything can happen in fiq context. + * FIQ handler does what it can and then signals this interrupt to finish the + * job in irq context. + */ +static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (state->pdata->force_irq_ack) + state->pdata->force_irq_ack(state->pdev, state->signal_irq); + + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_resume(struct fiq_glue_handler *h) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + if (state->pdata->uart_resume) + state->pdata->uart_resume(state->pdev); +} +#endif + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) +struct tty_driver *fiq_debugger_console_device(struct console *co, int *index) +{ + *index = co->index; + return fiq_tty_driver; +} + +static void fiq_debugger_console_write(struct console *co, + const char *s, unsigned int count) +{ + struct fiq_debugger_state *state; + unsigned long flags; + + state = container_of(co, struct fiq_debugger_state, console); + + if (!state->console_enable && !state->syslog_dumping) + return; + + fiq_debugger_uart_enable(state); + spin_lock_irqsave(&state->console_lock, flags); + while (count--) { + if (*s == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, *s++); + } + fiq_debugger_uart_flush(state); + spin_unlock_irqrestore(&state->console_lock, flags); + fiq_debugger_uart_disable(state); +} + +static struct console fiq_debugger_console = { + .name = "ttyFIQ", + .device = fiq_debugger_console_device, + .write = fiq_debugger_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; + +int fiq_tty_open(struct tty_struct *tty, struct file *filp) +{ + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + return tty_port_open(&state->tty_port, tty, filp); +} + +void fiq_tty_close(struct tty_struct *tty, struct file *filp) +{ + tty_port_close(tty->port, tty, filp); +} + +int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + if (!state->console_enable) + return count; + + fiq_debugger_uart_enable(state); + spin_lock_irq(&state->console_lock); + for (i = 0; i < count; i++) + fiq_debugger_putc(state, *buf++); + spin_unlock_irq(&state->console_lock); + fiq_debugger_uart_disable(state); + + return count; +} + +int fiq_tty_write_room(struct tty_struct *tty) +{ + return 16; +} + +#ifdef CONFIG_CONSOLE_POLL +static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options) +{ + return 0; +} + +static int fiq_tty_poll_get_char(struct tty_driver *driver, int line) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + int c = NO_POLL_CHAR; + + fiq_debugger_uart_enable(state); + if (fiq_debugger_have_fiq(state)) { + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + if (count > 0) { + c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + fiq_debugger_ringbuf_consume(state->tty_rbuf, 1); + } + } else { + c = fiq_debugger_getc(state); + if (c == FIQ_DEBUGGER_NO_CHAR) + c = NO_POLL_CHAR; + } + fiq_debugger_uart_disable(state); + + return c; +} + +static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + fiq_debugger_uart_enable(state); + fiq_debugger_putc(state, ch); + fiq_debugger_uart_disable(state); +} +#endif + +static const struct tty_port_operations fiq_tty_port_ops; + +static const struct tty_operations fiq_tty_driver_ops = { + .write = fiq_tty_write, + .write_room = fiq_tty_write_room, + .open = fiq_tty_open, + .close = fiq_tty_close, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = fiq_tty_poll_init, + .poll_get_char = fiq_tty_poll_get_char, + .poll_put_char = fiq_tty_poll_put_char, +#endif +}; + +static int fiq_debugger_tty_init(void) +{ + int ret; + struct fiq_debugger_state **states = NULL; + + states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL); + if (!states) { + pr_err("Failed to allocate fiq debugger state structres\n"); + return -ENOMEM; + } + + fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS); + if (!fiq_tty_driver) { + pr_err("Failed to allocate fiq debugger tty\n"); + ret = -ENOMEM; + goto err_free_state; + } + + fiq_tty_driver->owner = THIS_MODULE; + fiq_tty_driver->driver_name = "fiq-debugger"; + fiq_tty_driver->name = "ttyFIQ"; + fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL; + fiq_tty_driver->init_termios = tty_std_termios; + fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + fiq_tty_driver->driver_state = states; + + fiq_tty_driver->init_termios.c_cflag = + B115200 | CS8 | CREAD | HUPCL | CLOCAL; + fiq_tty_driver->init_termios.c_ispeed = 115200; + fiq_tty_driver->init_termios.c_ospeed = 115200; + + tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops); + + ret = tty_register_driver(fiq_tty_driver); + if (ret) { + pr_err("Failed to register fiq tty: %d\n", ret); + goto err_free_tty; + } + + pr_info("Registered FIQ tty driver\n"); + return 0; + +err_free_tty: + put_tty_driver(fiq_tty_driver); + fiq_tty_driver = NULL; +err_free_state: + kfree(states); + return ret; +} + +static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state) +{ + int ret; + struct device *tty_dev; + struct fiq_debugger_state **states = fiq_tty_driver->driver_state; + + states[state->pdev->id] = state; + + state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); + if (!state->tty_rbuf) { + pr_err("Failed to allocate fiq debugger ringbuf\n"); + ret = -ENOMEM; + goto err; + } + + tty_port_init(&state->tty_port); + state->tty_port.ops = &fiq_tty_port_ops; + + tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver, + state->pdev->id, &state->pdev->dev); + if (IS_ERR(tty_dev)) { + pr_err("Failed to register fiq debugger tty device\n"); + ret = PTR_ERR(tty_dev); + goto err; + } + + device_set_wakeup_capable(tty_dev, 1); + + pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id); + + return 0; + +err: + fiq_debugger_ringbuf_free(state->tty_rbuf); + state->tty_rbuf = NULL; + return ret; +} +#endif + +static int fiq_debugger_dev_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_suspend) + return state->pdata->uart_dev_suspend(pdev); + return 0; +} + +static int fiq_debugger_dev_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_resume) + return state->pdata->uart_dev_resume(pdev); + return 0; +} + +static int fiq_debugger_probe(struct platform_device *pdev) +{ + int ret; + struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fiq_debugger_state *state; + int fiq; + int uart_irq; + + if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS) + return -EINVAL; + + if (!pdata->uart_getc || !pdata->uart_putc) + return -EINVAL; + if ((pdata->uart_enable && !pdata->uart_disable) || + (!pdata->uart_enable && pdata->uart_disable)) + return -EINVAL; + + fiq = platform_get_irq_byname(pdev, "fiq"); + uart_irq = platform_get_irq_byname(pdev, "uart_irq"); + + /* uart_irq mode and fiq mode are mutually exclusive, but one of them + * is required */ + if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) + return -EINVAL; + if (fiq >= 0 && !pdata->fiq_enable) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + state->output.printf = fiq_debugger_printf; + setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired, + (unsigned long)state); + state->pdata = pdata; + state->pdev = pdev; + state->no_sleep = initial_no_sleep; + state->debug_enable = initial_debug_enable; + state->console_enable = initial_console_enable; + + state->fiq = fiq; + state->uart_irq = uart_irq; + state->signal_irq = platform_get_irq_byname(pdev, "signal"); + state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); + + INIT_WORK(&state->work, fiq_debugger_work); + spin_lock_init(&state->work_lock); + + platform_set_drvdata(pdev, state); + + spin_lock_init(&state->sleep_timer_lock); + + if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state)) + state->no_sleep = true; + state->ignore_next_wakeup_irq = !state->no_sleep; + + wake_lock_init(&state->debugger_wake_lock, + WAKE_LOCK_SUSPEND, "serial-debug"); + + state->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(state->clk)) + state->clk = NULL; + + /* do not call pdata->uart_enable here since uart_init may still + * need to do some initialization before uart_enable can work. + * So, only try to manage the clock during init. + */ + if (state->clk) + clk_enable(state->clk); + + if (pdata->uart_init) { + ret = pdata->uart_init(pdev); + if (ret) + goto err_uart_init; + } + + fiq_debugger_printf_nfiq(state, + "<hit enter %sto activate fiq debugger>\n", + state->no_sleep ? "" : "twice "); + +#ifdef CONFIG_FIQ_GLUE + if (fiq_debugger_have_fiq(state)) { + state->handler.fiq = fiq_debugger_fiq; + state->handler.resume = fiq_debugger_resume; + ret = fiq_glue_register_handler(&state->handler); + if (ret) { + pr_err("%s: could not install fiq handler\n", __func__); + goto err_register_irq; + } + + pdata->fiq_enable(pdev, state->fiq, 1); + } else +#endif + { + ret = request_irq(state->uart_irq, fiq_debugger_uart_irq, + IRQF_NO_SUSPEND, "debug", state); + if (ret) { + pr_err("%s: could not install irq handler\n", __func__); + goto err_register_irq; + } + + /* for irq-only mode, we want this irq to wake us up, if it + * can. + */ + enable_irq_wake(state->uart_irq); + } + + if (state->clk) + clk_disable(state->clk); + + if (state->signal_irq >= 0) { + ret = request_irq(state->signal_irq, fiq_debugger_signal_irq, + IRQF_TRIGGER_RISING, "debug-signal", state); + if (ret) + pr_err("serial_debugger: could not install signal_irq"); + } + + if (state->wakeup_irq >= 0) { + ret = request_irq(state->wakeup_irq, + fiq_debugger_wakeup_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_DISABLED, + "debug-wakeup", state); + if (ret) { + pr_err("serial_debugger: " + "could not install wakeup irq\n"); + state->wakeup_irq = -1; + } else { + ret = enable_irq_wake(state->wakeup_irq); + if (ret) { + pr_err("serial_debugger: " + "could not enable wakeup\n"); + state->wakeup_irq_no_set_wake = true; + } + } + } + if (state->no_sleep) + fiq_debugger_handle_wakeup(state); + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + spin_lock_init(&state->console_lock); + state->console = fiq_debugger_console; + state->console.index = pdev->id; + if (!console_set_on_cmdline) + add_preferred_console(state->console.name, + state->console.index, NULL); + register_console(&state->console); + fiq_debugger_tty_init_one(state); +#endif + return 0; + +err_register_irq: + if (pdata->uart_free) + pdata->uart_free(pdev); +err_uart_init: + if (state->clk) + clk_disable(state->clk); + if (state->clk) + clk_put(state->clk); + wake_lock_destroy(&state->debugger_wake_lock); + platform_set_drvdata(pdev, NULL); + kfree(state); + return ret; +} + +static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { + .suspend = fiq_debugger_dev_suspend, + .resume = fiq_debugger_dev_resume, +}; + +static struct platform_driver fiq_debugger_driver = { + .probe = fiq_debugger_probe, + .driver = { + .name = "fiq_debugger", + .pm = &fiq_debugger_dev_pm_ops, + }, +}; + +static int __init fiq_debugger_init(void) +{ +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + fiq_debugger_tty_init(); +#endif + return platform_driver_register(&fiq_debugger_driver); +} + +postcore_initcall(fiq_debugger_init); diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h new file mode 100644 index 000000000000..c9ec4f8db086 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h @@ -0,0 +1,64 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ +#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ + +#include <linux/serial_core.h> + +#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR +#define FIQ_DEBUGGER_BREAK 0x00ff0100 + +#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq" +#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal" +#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup" + +/** + * struct fiq_debugger_pdata - fiq debugger platform data + * @uart_resume: used to restore uart state right before enabling + * the fiq. + * @uart_enable: Do the work necessary to communicate with the uart + * hw (enable clocks, etc.). This must be ref-counted. + * @uart_disable: Do the work necessary to disable the uart hw + * (disable clocks, etc.). This must be ref-counted. + * @uart_dev_suspend: called during PM suspend, generally not needed + * for real fiq mode debugger. + * @uart_dev_resume: called during PM resume, generally not needed + * for real fiq mode debugger. + */ +struct fiq_debugger_pdata { + int (*uart_init)(struct platform_device *pdev); + void (*uart_free)(struct platform_device *pdev); + int (*uart_resume)(struct platform_device *pdev); + int (*uart_getc)(struct platform_device *pdev); + void (*uart_putc)(struct platform_device *pdev, unsigned int c); + void (*uart_flush)(struct platform_device *pdev); + void (*uart_enable)(struct platform_device *pdev); + void (*uart_disable)(struct platform_device *pdev); + + int (*uart_dev_suspend)(struct platform_device *pdev); + int (*uart_dev_resume)(struct platform_device *pdev); + + void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq, + bool enable); + void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq); + + void (*force_irq)(struct platform_device *pdev, unsigned int irq); + void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq); +}; + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c new file mode 100644 index 000000000000..8b3e0137be1a --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/ptrace.h> +#include <linux/uaccess.h> + +#include <asm/stacktrace.h> + +#include "fiq_debugger_priv.h" + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %08x cpsr %08x mode %s\n", + regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, + " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); + output->printf(output, + " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7); + output->printf(output, + " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp, + mode_name(regs->ARM_cpsr)); + output->printf(output, + " ip %08x sp %08x lr %08x pc %08x cpsr %08x\n", + regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc, + regs->ARM_cpsr); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +static void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + struct mode_regs mode_regs; + unsigned long mode = regs->ARM_cpsr & MODE_MASK; + + fiq_debugger_dump_regs(output, regs); + get_mode_regs(&mode_regs); + + output->printf(output, + "%csvc: sp %08x lr %08x spsr %08x\n", + mode == SVC_MODE ? '*' : ' ', + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + output->printf(output, + "%cabt: sp %08x lr %08x spsr %08x\n", + mode == ABT_MODE ? '*' : ' ', + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + output->printf(output, + "%cund: sp %08x lr %08x spsr %08x\n", + mode == UND_MODE ? '*' : ' ', + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + output->printf(output, + "%cirq: sp %08x lr %08x spsr %08x\n", + mode == IRQ_MODE ? '*' : ' ', + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + output->printf(output, + "%cfiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n", + mode == FIQ_MODE ? '*' : ' ', + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + output->printf(output, + " fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct fiq_debugger_output *output, + struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + output->printf(output, " invalid frame pointer %p\n", + tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + output->printf(output, + " failed to copy frame pointer %p\n", tail); + return NULL; + } + + output->printf(output, " %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + output->printf(output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &sts); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(output, tail); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c new file mode 100644 index 000000000000..99c6584fcfa5 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/ptrace.h> +#include <asm/stacktrace.h> + +#include "fiq_debugger_priv.h" + +static char *mode_name(const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) { + return "USR"; + } else { + switch (processor_mode(regs)) { + case PSR_MODE_EL0t: return "EL0t"; + case PSR_MODE_EL1t: return "EL1t"; + case PSR_MODE_EL1h: return "EL1h"; + case PSR_MODE_EL2t: return "EL2t"; + case PSR_MODE_EL2h: return "EL2h"; + default: return "???"; + } + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %016lx cpsr %08lx mode %s\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->compat_usr(0), regs->compat_usr(1), + regs->compat_usr(2), regs->compat_usr(3)); + output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->compat_usr(4), regs->compat_usr(5), + regs->compat_usr(6), regs->compat_usr(7)); + output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n", + regs->compat_usr(8), regs->compat_usr(9), + regs->compat_usr(10), regs->compat_usr(11)); + output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n", + regs->compat_usr(12), regs->compat_sp, + regs->compat_lr, regs->pc); + output->printf(output, " cpsr %08x (%s)\n", + regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + + output->printf(output, " x0 %016lx x1 %016lx\n", + regs->regs[0], regs->regs[1]); + output->printf(output, " x2 %016lx x3 %016lx\n", + regs->regs[2], regs->regs[3]); + output->printf(output, " x4 %016lx x5 %016lx\n", + regs->regs[4], regs->regs[5]); + output->printf(output, " x6 %016lx x7 %016lx\n", + regs->regs[6], regs->regs[7]); + output->printf(output, " x8 %016lx x9 %016lx\n", + regs->regs[8], regs->regs[9]); + output->printf(output, " x10 %016lx x11 %016lx\n", + regs->regs[10], regs->regs[11]); + output->printf(output, " x12 %016lx x13 %016lx\n", + regs->regs[12], regs->regs[13]); + output->printf(output, " x14 %016lx x15 %016lx\n", + regs->regs[14], regs->regs[15]); + output->printf(output, " x16 %016lx x17 %016lx\n", + regs->regs[16], regs->regs[17]); + output->printf(output, " x18 %016lx x19 %016lx\n", + regs->regs[18], regs->regs[19]); + output->printf(output, " x20 %016lx x21 %016lx\n", + regs->regs[20], regs->regs[21]); + output->printf(output, " x22 %016lx x23 %016lx\n", + regs->regs[22], regs->regs[23]); + output->printf(output, " x24 %016lx x25 %016lx\n", + regs->regs[24], regs->regs[25]); + output->printf(output, " x26 %016lx x27 %016lx\n", + regs->regs[26], regs->regs[27]); + output->printf(output, " x28 %016lx x29 %016lx\n", + regs->regs[28], regs->regs[29]); + output->printf(output, " x30 %016lx sp %016lx\n", + regs->regs[30], regs->sp); + output->printf(output, " pc %016lx cpsr %08x (%s)\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) + fiq_debugger_dump_regs_aarch32(output, regs); + else + fiq_debugger_dump_regs_aarch64(output, regs); +} + +#define READ_SPECIAL_REG(x) ({ \ + u64 val; \ + asm volatile ("mrs %0, " # x : "=r"(val)); \ + val; \ +}) + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + u32 pstate = READ_SPECIAL_REG(CurrentEl); + bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t; + + fiq_debugger_dump_regs(output, regs); + + output->printf(output, " sp_el0 %016lx\n", + READ_SPECIAL_REG(sp_el0)); + + if (in_el2) + output->printf(output, " sp_el1 %016lx\n", + READ_SPECIAL_REG(sp_el1)); + + output->printf(output, " elr_el1 %016lx\n", + READ_SPECIAL_REG(elr_el1)); + + output->printf(output, " spsr_el1 %08lx\n", + READ_SPECIAL_REG(spsr_el1)); + + if (in_el2) { + output->printf(output, " spsr_irq %08lx\n", + READ_SPECIAL_REG(spsr_irq)); + output->printf(output, " spsr_abt %08lx\n", + READ_SPECIAL_REG(spsr_abt)); + output->printf(output, " spsr_und %08lx\n", + READ_SPECIAL_REG(spsr_und)); + output->printf(output, " spsr_fiq %08lx\n", + READ_SPECIAL_REG(spsr_fiq)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(elr_el2)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(spsr_el2)); + } +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, "%pF:\n", frame->pc); + sts->output->printf(sts->output, + " pc %016lx sp %016lx fp %016lx\n", + frame->pc, frame->sp, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; + output->printf(output, "\n"); + walk_stackframe(&frame, report_trace, &sts); + } +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h new file mode 100644 index 000000000000..d5d051f727a8 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_DEBUGGER_PRIV_H_ +#define _FIQ_DEBUGGER_PRIV_H_ + +#define THREAD_INFO(sp) ((struct thread_info *) \ + ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) + +struct fiq_debugger_output { + void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...); +}; + +struct pt_regs; + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp); + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h new file mode 100644 index 000000000000..10c3c5d09098 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h @@ -0,0 +1,94 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h + * + * simple lockless ringbuffer + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +struct fiq_debugger_ringbuf { + int len; + int head; + int tail; + u8 buf[]; +}; + + +static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) +{ + struct fiq_debugger_ringbuf *rbuf; + + rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); + if (rbuf == NULL) + return NULL; + + rbuf->len = len; + rbuf->head = 0; + rbuf->tail = 0; + smp_mb(); + + return rbuf; +} + +static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) +{ + kfree(rbuf); +} + +static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) +{ + int level = rbuf->head - rbuf->tail; + + if (level < 0) + level = rbuf->len + level; + + return level; +} + +static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) +{ + return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; +} + +static inline u8 +fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) +{ + return rbuf->buf[(rbuf->tail + i) % rbuf->len]; +} + +static inline int +fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) +{ + count = min(count, fiq_debugger_ringbuf_level(rbuf)); + + rbuf->tail = (rbuf->tail + count) % rbuf->len; + smp_mb(); + + return count; +} + +static inline int +fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) +{ + if (fiq_debugger_ringbuf_room(rbuf) == 0) + return 0; + + rbuf->buf[rbuf->head] = datum; + smp_mb(); + rbuf->head = (rbuf->head + 1) % rbuf->len; + smp_mb(); + + return 1; +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c new file mode 100644 index 000000000000..194b54138417 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/pstore_ram.h> + +#include "fiq_watchdog.h" +#include "fiq_debugger_priv.h" + +static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock); + +static void fiq_watchdog_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + char buf[256]; + va_list ap; + int len; + + va_start(ap, fmt); + len = vscnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + ramoops_console_write_buf(buf, len); +} + +struct fiq_debugger_output fiq_watchdog_output = { + .printf = fiq_watchdog_printf, +}; + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp) +{ + char msg[24]; + int len; + + raw_spin_lock(&fiq_watchdog_lock); + + len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n", + THREAD_INFO(svc_sp)->cpu); + ramoops_console_write_buf(msg, len); + + fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp); + + raw_spin_unlock(&fiq_watchdog_lock); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h new file mode 100644 index 000000000000..c6b507f8d976 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_WATCHDOG_H_ +#define _FIQ_WATCHDOG_H_ + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp); + +#endif diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 000000000000..0f8fec1f84e5 --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION + bool "Ion Memory Manager" + depends on HAVE_MEMBLOCK + select GENERIC_ALLOCATOR + select DMA_SHARED_BUFFER + ---help--- + Chose this option to enable the ION Memory Manager, + used by Android to efficiently allocate buffers + from userspace that can be shared between drivers. + If you're not using Android its probably safe to + say N here. + +config ION_TEST + tristate "Ion Test Device" + depends on ION + help + Choose this option to create a device that can be used to test the + kernel and device side ION functions. + +config ION_DUMMY + bool "Dummy Ion driver" + depends on ION + help + Provides a dummy ION driver that registers the + /dev/ion device and some basic heaps. This can + be used for testing the ION infrastructure if + one doesn't have access to hardware drivers that + use ION. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 000000000000..b56fd2bf2b4f --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ + ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 000000000000..a5e8c408fd78 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/gpu/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { + compat_size_t len; + compat_size_t align; + compat_uint_t heap_id_mask; + compat_uint_t flags; + compat_int_t handle; +}; + +struct compat_ion_custom_data { + compat_uint_t cmd; + compat_ulong_t arg; +}; + +struct compat_ion_handle_data { + compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ + struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ + struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data32->len); + err |= put_user(s, &data->len); + err |= get_user(s, &data32->align); + err |= put_user(s, &data->align); + err |= get_user(u, &data32->heap_id_mask); + err |= put_user(u, &data->heap_id_mask); + err |= get_user(u, &data32->flags); + err |= put_user(u, &data->flags); + err |= get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_get_ion_handle_data( + struct compat_ion_handle_data __user *data32, + struct ion_handle_data __user *data) +{ + compat_int_t i; + int err; + + err = get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_put_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data->len); + err |= put_user(s, &data32->len); + err |= get_user(s, &data->align); + err |= put_user(s, &data32->align); + err |= get_user(u, &data->heap_id_mask); + err |= put_user(u, &data32->heap_id_mask); + err |= get_user(u, &data->flags); + err |= put_user(u, &data32->flags); + err |= get_user(i, &data->handle); + err |= put_user(i, &data32->handle); + + return err; +} + +static int compat_get_ion_custom_data( + struct compat_ion_custom_data __user *data32, + struct ion_custom_data __user *data) +{ + compat_uint_t cmd; + compat_ulong_t arg; + int err; + + err = get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + err |= get_user(arg, &data32->arg); + err |= put_user(arg, &data->arg); + + return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!filp->f_op || !filp->f_op->unlocked_ioctl) + return -ENOTTY; + + switch (cmd) { + case COMPAT_ION_IOC_ALLOC: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, + (unsigned long)data); + err = compat_put_ion_allocation_data(data32, data); + return ret ? ret : err; + } + case COMPAT_ION_IOC_FREE: + { + struct compat_ion_handle_data __user *data32; + struct ion_handle_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_handle_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, + (unsigned long)data); + } + case COMPAT_ION_IOC_CUSTOM: { + struct compat_ion_custom_data __user *data32; + struct ion_custom_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_custom_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, + (unsigned long)data); + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + case ION_IOC_IMPORT: + case ION_IOC_SYNC: + return filp->f_op->unlocked_ioctl(filp, cmd, + (unsigned long)compat_ptr(arg)); + default: + return -ENOIOCTLCMD; + } +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 000000000000..3a9c8c08c240 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,30 @@ +/* + + * drivers/gpu/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 000000000000..698e363be551 --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1644 @@ +/* + + * drivers/gpu/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/file.h> +#include <linux/freezer.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/miscdevice.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/debugfs.h> +#include <linux/dma-buf.h> +#include <linux/idr.h> + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root clients; + struct dentry *debug_root; + struct dentry *heaps_debug_root; + struct dentry *clients_debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @idr: an idr space for allocating handle ids + * @lock: lock protecting the tree of handles + * @name: used for debugging + * @display_name: used for debugging (unique version of @name) + * @display_serial: used for debugging (to make display_name unique) + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct idr idr; + struct mutex lock; + const char *name; + char *display_name; + int display_serial; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @id: client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ + return (buffer->flags & ION_FLAG_CACHED) && + !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ + return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ + return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + buffer->flags = flags; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + + if (ret) { + if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) + goto err2; + + ion_heap_freelist_drain(heap, 0); + ret = heap->ops->allocate(heap, buffer, len, align, + flags); + if (ret) + goto err2; + } + + buffer->dev = dev; + buffer->size = len; + + table = heap->ops->map_dma(heap, buffer); + if (WARN_ONCE(table == NULL, + "heap->ops->map_dma should return ERR_PTR on error")) + table = ERR_PTR(-EINVAL); + if (IS_ERR(table)) { + heap->ops->free(buffer); + kfree(buffer); + return ERR_PTR(PTR_ERR(table)); + } + buffer->sg_table = table; + if (ion_buffer_fault_user_mappings(buffer)) { + int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct scatterlist *sg; + int i, j, k = 0; + + buffer->pages = vmalloc(sizeof(struct page *) * num_pages); + if (!buffer->pages) { + ret = -ENOMEM; + goto err1; + } + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + for (j = 0; j < sg->length / PAGE_SIZE; j++) + buffer->pages[k++] = page++; + } + + if (ret) + goto err; + } + + buffer->dev = dev; + buffer->size = len; + INIT_LIST_HEAD(&buffer->vmas); + mutex_init(&buffer->lock); + /* this will set up dma addresses for the sglist -- it is not + technically correct as per the dma api -- a specific + device isn't really taking ownership here. However, in practice on + our systems the only dma_address space is physical addresses. + Additionally, we can't afford the overhead of invalidating every + allocation via dma_map_sg. The implicit contract here is that + memory comming from the heaps is ready for dma, ie if it has a + cached mapping that mapping has been invalidated */ + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) + sg_dma_address(sg) = sg_phys(sg); + mutex_lock(&dev->buffer_lock); + ion_buffer_add(dev, buffer); + mutex_unlock(&dev->buffer_lock); + return buffer; + +err: + heap->ops->unmap_dma(heap, buffer); + heap->ops->free(buffer); +err1: + if (buffer->pages) + vfree(buffer->pages); +err2: + kfree(buffer); + return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ + if (WARN_ON(buffer->kmap_cnt > 0)) + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->heap->ops->free(buffer); + if (buffer->pages) + vfree(buffer->pages); + kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_heap *heap = buffer->heap; + struct ion_device *dev = buffer->dev; + + mutex_lock(&dev->buffer_lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->buffer_lock); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_freelist_add(heap, buffer); + else + ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ + mutex_lock(&buffer->lock); + buffer->handle_count++; + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ + /* + * when a buffer is removed from a handle, if it is not in + * any other handles, copy the taskcomm and the pid of the + * process it's being removed from into the buffer. At this + * point there will be no way to track what processes this buffer is + * being used by, it only exists as a dma_buf file descriptor. + * The taskcomm and pid can provide a debug hint as to where this fd + * is in the system + */ + mutex_lock(&buffer->lock); + buffer->handle_count--; + BUG_ON(buffer->handle_count < 0); + if (!buffer->handle_count) { + struct task_struct *task; + + task = current->group_leader; + get_task_comm(buffer->task_comm, task); + buffer->pid = task_pid_nr(task); + } + mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + RB_CLEAR_NODE(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + ion_buffer_add_to_handle(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + struct ion_client *client = handle->client; + struct ion_buffer *buffer = handle->buffer; + + mutex_lock(&buffer->lock); + while (handle->kmap_cnt) + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + + idr_remove(&client->idr, handle->id); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &client->handles); + + ion_buffer_remove_from_handle(buffer); + ion_buffer_put(buffer); + + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + int ret; + + mutex_lock(&client->lock); + ret = kref_put(&handle->ref, ion_handle_destroy); + mutex_unlock(&client->lock); + + return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + + if (buffer < entry->buffer) + n = n->rb_left; + else if (buffer > entry->buffer) + n = n->rb_right; + else + return entry; + } + return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = idr_find(&client->idr, id); + if (handle) + ion_handle_get(handle); + mutex_unlock(&client->lock); + + return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, + struct ion_handle *handle) +{ + WARN_ON(!mutex_is_locked(&client->lock)); + return (idr_find(&client->idr, handle->id) == handle); +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + int id; + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); + if (id < 0) + return id; + + handle->id = id; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle->buffer < entry->buffer) + p = &(*p)->rb_left; + else if (handle->buffer > entry->buffer) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); + + return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + struct ion_heap *heap; + int ret; + + pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, + len, align, heap_id_mask, flags); + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + len = PAGE_ALIGN(len); + + if (!len) + return ERR_PTR(-EINVAL); + + down_read(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + /* if the caller didn't specify this heap id */ + if (!((1 << heap->id) & heap_id_mask)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR(buffer)) + break; + } + up_read(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (IS_ERR(handle)) + return handle; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + mutex_unlock(&client->lock); + return; + } + mutex_unlock(&client->lock); + ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ + void *vaddr; + + if (buffer->kmap_cnt) { + buffer->kmap_cnt++; + return buffer->vaddr; + } + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (WARN_ONCE(vaddr == NULL, + "heap->ops->map_kernel should return ERR_PTR on error")) + return ERR_PTR(-EINVAL); + if (IS_ERR(vaddr)) + return vaddr; + buffer->vaddr = vaddr; + buffer->kmap_cnt++; + return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + void *vaddr; + + if (handle->kmap_cnt) { + handle->kmap_cnt++; + return buffer->vaddr; + } + vaddr = ion_buffer_kmap_get(buffer); + if (IS_ERR(vaddr)) + return vaddr; + handle->kmap_cnt++; + return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ + buffer->kmap_cnt--; + if (!buffer->kmap_cnt) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + + handle->kmap_cnt--; + if (!handle->kmap_cnt) + ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&buffer->lock); + vaddr = ion_handle_kmap_get(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAP_IDS] = {0}; + const char *names[ION_NUM_HEAP_IDS] = {NULL}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + unsigned int id = handle->buffer->heap->id; + + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAP_IDS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ion_get_client_serial(const struct rb_root *root, + const unsigned char *name) +{ + int serial = -1; + struct rb_node *node; + + for (node = rb_first(root); node; node = rb_next(node)) { + struct ion_client *client = rb_entry(node, struct ion_client, + node); + + if (strcmp(client->name, name)) + continue; + serial = max(serial, client->display_serial); + } + return serial + 1; +} + +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + pid_t pid; + + if (!name) { + pr_err("%s: Name cannot be null\n", __func__); + return ERR_PTR(-EINVAL); + } + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) + goto err_put_task_struct; + + client->dev = dev; + client->handles = RB_ROOT; + idr_init(&client->idr); + mutex_init(&client->lock); + client->task = task; + client->pid = pid; + client->name = kstrdup(name, GFP_KERNEL); + if (!client->name) + goto err_free_client; + + down_write(&dev->lock); + client->display_serial = ion_get_client_serial(&dev->clients, name); + client->display_name = kasprintf( + GFP_KERNEL, "%s-%d", name, client->display_serial); + if (!client->display_name) { + up_write(&dev->lock); + goto err_free_client_name; + } + p = &dev->clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->clients); + + client->debug_root = debugfs_create_file(client->display_name, 0664, + dev->clients_debug_root, + client, &debug_client_fops); + if (!client->debug_root) { + char buf[256], *path; + path = dentry_path(dev->clients_debug_root, buf, 256); + pr_err("Failed to create client debugfs at %s/%s\n", + path, client->display_name); + } + + up_write(&dev->lock); + + return client; + +err_free_client_name: + kfree(client->name); +err_free_client: + kfree(client); +err_put_task_struct: + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + + idr_destroy(&client->idr); + + down_write(&dev->lock); + if (client->task) + put_task_struct(client->task); + rb_erase(&client->node, &dev->clients); + debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); + + kfree(client->display_name); + kfree(client->name); + kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct sg_table *table; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + table = buffer->sg_table; + mutex_unlock(&client->lock); + return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t that is valid + * for the the targeted device, but this works on the currently targeted + * hardware. + */ + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vma->vm_private_data; + unsigned long pfn; + int ret; + + mutex_lock(&buffer->lock); + ion_buffer_page_dirty(buffer->pages + vmf->pgoff); + BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + + pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + mutex_unlock(&buffer->lock); + if (ret) + return VM_FAULT_ERROR; + + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = dmabuf->priv; + int ret = 0; + + if (!buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping " + "to userspace\n", __func__); + return -EINVAL; + } + + if (ion_buffer_fault_user_mappings(buffer)) { + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | + VM_DONTDUMP; + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + return 0; + } + + if (!(buffer->flags & ION_FLAG_CACHED)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + + if (ret) + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + + return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ + struct ion_buffer *buffer = dmabuf->priv; + + return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, + void *ptr) +{ + return; +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; + } + + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + return 0; +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = ion_map_dma_buf, + .unmap_dma_buf = ion_unmap_dma_buf, + .mmap = ion_mmap, + .release = ion_dma_buf_release, + .begin_cpu_access = ion_dma_buf_begin_cpu_access, + .end_cpu_access = ion_dma_buf_end_cpu_access, + .kmap_atomic = ion_dma_buf_kmap, + .kunmap_atomic = ion_dma_buf_kunmap, + .kmap = ion_dma_buf_kmap, + .kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); + mutex_unlock(&client->lock); + + dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); + if (IS_ERR(dmabuf)) { + ion_buffer_put(buffer); + return dmabuf; + } + + return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = ion_share_dma_buf(client, handle); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, O_CLOEXEC); + if (fd < 0) + dma_buf_put(dmabuf); + + return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + struct ion_handle *handle; + int ret; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_PTR(PTR_ERR(dmabuf)); + /* if this memory came from ion */ + + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not import dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return ERR_PTR(-EINVAL); + } + buffer = dmabuf->priv; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { + ion_handle_get(handle); + mutex_unlock(&client->lock); + goto end; + } + mutex_unlock(&client->lock); + + handle = ion_handle_create(client, buffer); + if (IS_ERR(handle)) + goto end; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + +end: + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + /* if this memory came from ion */ + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not sync dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + buffer = dmabuf->priv; + + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_BIDIRECTIONAL); + dma_buf_put(dmabuf); + return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ + switch (cmd) { + case ION_IOC_SYNC: + case ION_IOC_FREE: + case ION_IOC_CUSTOM: + return _IOC_WRITE; + default: + return _IOC_DIR(cmd); + } +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + struct ion_device *dev = client->dev; + struct ion_handle *cleanup_handle = NULL; + int ret = 0; + unsigned int dir; + + union { + struct ion_fd_data fd; + struct ion_allocation_data allocation; + struct ion_handle_data handle; + struct ion_custom_data custom; + } data; + + dir = ion_ioctl_dir(cmd); + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (dir & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_handle *handle; + + handle = ion_alloc(client, data.allocation.len, + data.allocation.align, + data.allocation.heap_id_mask, + data.allocation.flags); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + data.allocation.handle = handle->id; + + cleanup_handle = handle; + break; + } + case ION_IOC_FREE: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + ion_free(client, handle); + ion_handle_put(handle); + break; + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + data.fd.fd = ion_share_dma_buf_fd(client, handle); + ion_handle_put(handle); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; + } + case ION_IOC_IMPORT: + { + struct ion_handle *handle; + + handle = ion_import_dma_buf(client, data.fd.fd); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + break; + } + case ION_IOC_SYNC: + { + ret = ion_sync_for_device(client, data.fd.fd); + break; + } + case ION_IOC_CUSTOM: + { + if (!dev->custom_ioctl) + return -ENOTTY; + ret = dev->custom_ioctl(client, data.custom.cmd, + data.custom.arg); + break; + } + default: + return -ENOTTY; + } + + if (dir & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { + if (cleanup_handle) + ion_free(client, cleanup_handle); + return -EFAULT; + } + } + return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_destroy(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + char debug_name[64]; + + pr_debug("%s: %d\n", __func__, __LINE__); + snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); + client = ion_client_create(dev, debug_name); + if (IS_ERR(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, + .compat_ioctl = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + unsigned int id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + size_t total_size = 0; + size_t total_orphaned_size = 0; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + seq_printf(s, "----------------------------------------------------\n"); + + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + + if (!size) + continue; + if (client->task) { + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16zu\n", task_comm, + client->pid, size); + } else { + seq_printf(s, "%16.s %16u %16zu\n", client->name, + client->pid, size); + } + } + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "orphaned allocations (info is from last known client):" + "\n"); + mutex_lock(&dev->buffer_lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, + node); + if (buffer->heap->id != heap->id) + continue; + total_size += buffer->size; + if (!buffer->handle_count) { + seq_printf(s, "%16.s %16u %16zu %d %d\n", + buffer->task_comm, buffer->pid, + buffer->size, buffer->kmap_cnt, + atomic_read(&buffer->ref.refcount)); + total_orphaned_size += buffer->size; + } + } + mutex_unlock(&dev->buffer_lock); + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "%16.s %16zu\n", "total orphaned", + total_orphaned_size); + seq_printf(s, "%16.s %16zu\n", "total ", total_size); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + seq_printf(s, "%16.s %16zu\n", "deferred free", + heap->free_list_size); + seq_printf(s, "----------------------------------------------------\n"); + + if (heap->debug_show) + heap->debug_show(heap, s, unused); + + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + if (!val) + return 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + sc.nr_to_scan = objs; + + heap->shrinker.shrink(&heap->shrinker, &sc); + return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + *val = objs; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, + debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + struct dentry *debug_file; + + if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || + !heap->ops->unmap_dma) + pr_err("%s: can not add heap with invalid ops struct.\n", + __func__); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_init_deferred_free(heap); + + if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) + ion_heap_init_shrinker(heap); + + heap->dev = dev; + down_write(&dev->lock); + /* use negative heap->id to reverse the priority -- when traversing + the list later attempt higher id numbers first */ + plist_node_init(&heap->node, -heap->id); + plist_add(&heap->node, &dev->heaps); + debug_file = debugfs_create_file(heap->name, 0664, + dev->heaps_debug_root, heap, + &debug_heap_fops); + + if (!debug_file) { + char buf[256], *path; + + path = dentry_path(dev->heaps_debug_root, buf, 256); + pr_err("Failed to create heap debugfs at %s/%s\n", + path, heap->name); + } + +#ifdef DEBUG_HEAP_SHRINKER + if (heap->shrinker.shrink) { + char debug_name[64]; + + snprintf(debug_name, 64, "%s_shrink", heap->name); + debug_file = debugfs_create_file( + debug_name, 0644, dev->heaps_debug_root, heap, + &debug_shrink_fops); + if (!debug_file) { + char buf[256], *path; + + path = dentry_path(dev->heaps_debug_root, buf, 256); + pr_err("Failed to create heap shrinker debugfs at %s/%s\n", + path, debug_name); + } + } +#endif + up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (!idev->debug_root) { + pr_err("ion: failed to create debugfs root directory.\n"); + goto debugfs_done; + } + idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); + if (!idev->heaps_debug_root) { + pr_err("ion: failed to create debugfs heaps directory.\n"); + goto debugfs_done; + } + idev->clients_debug_root = debugfs_create_dir("clients", + idev->debug_root); + if (!idev->clients_debug_root) + pr_err("ion: failed to create debugfs clients directory.\n"); + +debugfs_done: + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->buffer_lock); + init_rwsem(&idev->lock); + plist_head_init(&idev->heaps); + idev->clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + debugfs_remove_recursive(dev->debug_root); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ + int i; + + for (i = 0; i < data->nr; i++) { + if (data->heaps[i].size == 0) + continue; + + if (data->heaps[i].base == 0) { + phys_addr_t paddr; + + paddr = memblock_alloc_base(data->heaps[i].size, + data->heaps[i].align, + MEMBLOCK_ALLOC_ANYWHERE); + if (!paddr) { + pr_err("%s: error allocating memblock for " + "heap %d\n", + __func__, i); + continue; + } + data->heaps[i].base = paddr; + } else { + int ret = memblock_reserve(data->heaps[i].base, + data->heaps[i].size); + if (ret) + pr_err("memblock reserve of %zx@%lx failed\n", + data->heaps[i].size, + data->heaps[i].base); + } + pr_info("%s: %s reserved base %lx size %zu\n", __func__, + data->heaps[i].name, + data->heaps[i].base, + data->heaps[i].size); + } +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 000000000000..dcd2a0cdb192 --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,204 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include <linux/types.h> + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating higher numbers + * will be allocated from first. At allocation these are passed + * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * @align: required alignment in physical memory if applicable + * @priv: private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; + ion_phys_addr_t align; + void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + int nr; + struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data: platform data specifying starting physical address and + * size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @heap_type_mask: mask of heaps this client can allocate from + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks + * have alignment requirements of some kind + * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from highest to lowest + * id + * @flags: heap flags, the low 16 bits are consumed by ion, the + * high 16 bits are passed on to the respective heap and + * can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client: the client + * @handle: the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client: the client + * @handle: the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 000000000000..5165de2ce343 --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/gpu/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + *addr = paddr; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct sg_table *table; + ion_phys_addr_t paddr; + int ret; + + if (align > PAGE_SIZE) + return -EINVAL; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_free; + + paddr = ion_carveout_allocate(heap, size, align); + if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { + ret = -ENOMEM; + goto err_free_table; + } + + sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); + buffer->priv_virt = table; + + return 0; + +err_free_table: + sg_free_table(table); +err_free: + kfree(table); + return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + ion_carveout_free(heap, paddr, buffer->size); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_dma = ion_carveout_heap_map_dma, + .unmap_dma = ion_carveout_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + int ret; + + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 000000000000..ca20d6279602 --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/gpu/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned long chunk_size; + unsigned long size; + unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table; + struct scatterlist *sg; + int ret, i; + unsigned long num_chunks; + unsigned long allocated_size; + + if (align > chunk_heap->chunk_size) + return -EINVAL; + + allocated_size = ALIGN(size, chunk_heap->chunk_size); + num_chunks = allocated_size / chunk_heap->chunk_size; + + if (allocated_size > chunk_heap->size - chunk_heap->allocated) + return -ENOMEM; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); + if (ret) { + kfree(table); + return ret; + } + + sg = table->sgl; + for (i = 0; i < num_chunks; i++) { + unsigned long paddr = gen_pool_alloc(chunk_heap->pool, + chunk_heap->chunk_size); + if (!paddr) + goto err; + sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), + chunk_heap->chunk_size, 0); + sg = sg_next(sg); + } + + buffer->priv_virt = table; + chunk_heap->allocated += allocated_size; + return 0; +err: + sg = table->sgl; + for (i -= 1; i >= 0; i--) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + sg = sg_next(sg); + } + sg_free_table(table); + kfree(table); + return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table = buffer->priv_virt; + struct scatterlist *sg; + int i; + unsigned long allocated_size; + + allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + for_each_sg(table->sgl, sg, table->nents, i) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + } + chunk_heap->allocated -= allocated_size; + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops chunk_heap_ops = { + .allocate = ion_chunk_heap_allocate, + .free = ion_chunk_heap_free, + .map_dma = ion_chunk_heap_map_dma, + .unmap_dma = ion_chunk_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_chunk_heap *chunk_heap; + int ret; + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); + if (!chunk_heap) + return ERR_PTR(-ENOMEM); + + chunk_heap->chunk_size = (unsigned long)heap_data->priv; + chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + + PAGE_SHIFT, -1); + if (!chunk_heap->pool) { + ret = -ENOMEM; + goto error_gen_pool_create; + } + chunk_heap->base = heap_data->base; + chunk_heap->size = heap_data->size; + chunk_heap->allocated = 0; + + gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); + chunk_heap->heap.ops = &chunk_heap_ops; + chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; + chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, + heap_data->size, heap_data->align); + + return &chunk_heap->heap; + +error_gen_pool_create: + kfree(chunk_heap); + return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + + gen_pool_destroy(chunk_heap->pool); + kfree(chunk_heap); + chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 000000000000..4418bda76474 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,218 @@ +/* + * drivers/gpu/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { + struct ion_heap heap; + struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + if (buffer->flags & ION_FLAG_CACHED) + return -EINVAL; + + if (align > PAGE_SIZE) + return -EINVAL; + + info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); + if (!info) { + dev_err(dev, "Can't allocate buffer info\n"); + return ION_CMA_ALLOCATE_FAILED; + } + + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), + GFP_HIGHUSER | __GFP_ZERO); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); + goto err; + } + + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) { + dev_err(dev, "Fail to allocate sg table\n"); + goto free_mem; + } + + if (ion_cma_get_sgtable + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(dev, "Allocate buffer %p\n", buffer); + return 0; + +free_table: + kfree(info->table); +free_mem: + dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Release buffer %p\n", buffer); + /* release memory */ + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer, + &info->handle); + + *addr = info->handle; + *len = buffer->size; + + return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { + .allocate = ion_cma_allocate, + .free = ion_cma_free, + .map_dma = ion_cma_heap_map_dma, + .unmap_dma = ion_cma_heap_unmap_dma, + .phys = ion_cma_phys, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, + .unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + + if (!cma_heap) + return ERR_PTR(-ENOMEM); + + cma_heap->heap.ops = &ion_cma_ops; + /* get device from private heaps data, later it will be + * used to make the link with reserved CMA memory */ + cma_heap->dev = data->priv; + cma_heap->heap.type = ION_HEAP_TYPE_DMA; + return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + + kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 000000000000..55b2002753f2 --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,158 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/sizes.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_device *idev; +struct ion_heap **heaps; + +void *carveout_ptr; +void *chunk_ptr; + +struct ion_platform_heap dummy_heaps[] = { + { + .id = ION_HEAP_TYPE_SYSTEM, + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + }, + { + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "system contig", + }, + { + .id = ION_HEAP_TYPE_CARVEOUT, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .size = SZ_4M, + }, + { + .id = ION_HEAP_TYPE_CHUNK, + .type = ION_HEAP_TYPE_CHUNK, + .name = "chunk", + .size = SZ_4M, + .align = SZ_16K, + .priv = (void *)(SZ_16K), + }, +}; + +struct ion_platform_data dummy_ion_pdata = { + .nr = 4, + .heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ + int i, err; + + idev = ion_device_create(NULL); + heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, + GFP_KERNEL); + if (!heaps) + return PTR_ERR(heaps); + + + /* Allocate a dummy carveout heap */ + carveout_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, + GFP_KERNEL); + if (carveout_ptr) + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = + virt_to_phys(carveout_ptr); + else + pr_err("ion_dummy: Could not allocate carveout\n"); + + /* Allocate a dummy chunk heap */ + chunk_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CHUNK].size, + GFP_KERNEL); + if (chunk_ptr) + dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); + else + pr_err("ion_dummy: Could not allocate chunk\n"); + + for (i = 0; i < dummy_ion_pdata.nr; i++) { + struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + + if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && + !heap_data->base) + continue; + + if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) + continue; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + return 0; +err: + for (i = 0; i < dummy_ion_pdata.nr; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + return err; +} + +static void __exit ion_dummy_exit(void) +{ + int i; + + ion_device_destroy(idev); + + for (i = 0; i < dummy_ion_pdata.nr; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + + return; +} + +module_init(ion_dummy_init); +module_exit(ion_dummy_exit); + diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 000000000000..e1bbd8b1b4f4 --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,371 @@ +/* + * drivers/gpu/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/freezer.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/scatterlist.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i, j; + void *vaddr; + pgprot_t pgprot; + struct sg_table *table = buffer->sg_table; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + + if (!pages) + return NULL; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sg(table->sgl, sg, table->nents, i) { + int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; + struct page *page = sg_page(sg); + + BUG_ON(i >= npages); + for (j = 0; j < npages_this_entry; j++) + *(tmp++) = page++; + } + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (vaddr == NULL) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct sg_table *table = buffer->sg_table; + unsigned long addr = vma->vm_start; + unsigned long offset = vma->vm_pgoff * PAGE_SIZE; + struct scatterlist *sg; + int i; + int ret; + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + unsigned long remainder = vma->vm_end - addr; + unsigned long len = sg->length; + + if (offset >= sg->length) { + offset -= sg->length; + continue; + } else if (offset) { + page += offset / PAGE_SIZE; + len = sg->length - offset; + offset = 0; + } + len = min(len, remainder); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, + vma->vm_page_prot); + if (ret) + return ret; + addr += len; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ + void *addr = vm_map_ram(pages, num, -1, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); + vm_unmap_ram(addr, num); + + return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, + pgprot_t pgprot) +{ + int p = 0; + int ret = 0; + struct sg_page_iter piter; + struct page *pages[32]; + + for_each_sg_page(sgl, &piter, nents, 0) { + pages[p++] = sg_page_iter_page(&piter); + if (p == ARRAY_SIZE(pages)) { + ret = ion_heap_clear_pages(pages, p, pgprot); + if (ret) + return ret; + p = 0; + } + } + if (p) + ret = ion_heap_clear_pages(pages, p, pgprot); + + return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->sg_table; + pgprot_t pgprot; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ + spin_lock(&heap->free_lock); + list_add(&buffer->list, &heap->free_list); + heap->free_list_size += buffer->size; + spin_unlock(&heap->free_lock); + wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ + size_t size; + + spin_lock(&heap->free_lock); + size = heap->free_list_size; + spin_unlock(&heap->free_lock); + + return size; +} + +static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, + bool skip_pools) +{ + struct ion_buffer *buffer; + size_t total_drained = 0; + + if (ion_heap_freelist_size(heap) == 0) + return 0; + + spin_lock(&heap->free_lock); + if (size == 0) + size = heap->free_list_size; + + while (!list_empty(&heap->free_list)) { + if (total_drained >= size) + break; + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + if (skip_pools) + buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; + total_drained += buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + spin_lock(&heap->free_lock); + } + spin_unlock(&heap->free_lock); + + return total_drained; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ + return _ion_heap_freelist_drain(heap, size, false); +} + +size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) +{ + return _ion_heap_freelist_drain(heap, size, true); +} + +static int ion_heap_deferred_free(void *data) +{ + struct ion_heap *heap = data; + + while (true) { + struct ion_buffer *buffer; + + wait_event_freezable(heap->waitqueue, + ion_heap_freelist_size(heap) > 0); + + spin_lock(&heap->free_lock); + if (list_empty(&heap->free_list)) { + spin_unlock(&heap->free_lock); + continue; + } + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + } + + return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ + struct sched_param param = { .sched_priority = 0 }; + + INIT_LIST_HEAD(&heap->free_list); + heap->free_list_size = 0; + spin_lock_init(&heap->free_lock); + init_waitqueue_head(&heap->waitqueue); + heap->task = kthread_run(ion_heap_deferred_free, heap, + "%s", heap->name); + sched_setscheduler(heap->task, SCHED_IDLE, ¶m); + if (IS_ERR(heap->task)) { + pr_err("%s: creating thread for deferred free failed\n", + __func__); + return PTR_RET(heap->task); + } + return 0; +} + +static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + int total = 0; + int freed = 0; + int to_scan = sc->nr_to_scan; + + if (to_scan == 0) + goto out; + + /* + * shrink the free list first, no point in zeroing the memory if we're + * just going to reclaim it. Also, skip any possible page pooling. + */ + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / + PAGE_SIZE; + + to_scan -= freed; + if (to_scan < 0) + to_scan = 0; + +out: + total = ion_heap_freelist_size(heap) / PAGE_SIZE; + if (heap->ops->shrink) + total += heap->ops->shrink(heap, sc->gfp_mask, to_scan); + return total; +} + +void ion_heap_init_shrinker(struct ion_heap *heap) +{ + heap->shrinker.shrink = ion_heap_shrink; + heap->shrinker.seeks = DEFAULT_SEEKS; + heap->shrinker.batch = 0; + register_shrinker(&heap->shrinker); +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CHUNK: + heap = ion_chunk_heap_create(heap_data); + break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CHUNK: + ion_chunk_heap_destroy(heap); + break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 000000000000..0e20e62ec4bc --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,190 @@ +/* + * drivers/gpu/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include "ion_priv.h" + +struct ion_page_pool_item { + struct page *page; + struct list_head list; +}; + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ + struct page *page = alloc_pages(pool->gfp_mask, pool->order); + + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, + DMA_BIDIRECTIONAL); + return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ + struct ion_page_pool_item *item; + + item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL); + if (!item) + return -ENOMEM; + + mutex_lock(&pool->mutex); + item->page = page; + if (PageHighMem(page)) { + list_add_tail(&item->list, &pool->high_items); + pool->high_count++; + } else { + list_add_tail(&item->list, &pool->low_items); + pool->low_count++; + } + mutex_unlock(&pool->mutex); + return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ + struct ion_page_pool_item *item; + struct page *page; + + if (high) { + BUG_ON(!pool->high_count); + item = list_first_entry(&pool->high_items, + struct ion_page_pool_item, list); + pool->high_count--; + } else { + BUG_ON(!pool->low_count); + item = list_first_entry(&pool->low_items, + struct ion_page_pool_item, list); + pool->low_count--; + } + + list_del(&item->list); + page = item->page; + kfree(item); + return page; +} + +void *ion_page_pool_alloc(struct ion_page_pool *pool) +{ + struct page *page = NULL; + + BUG_ON(!pool); + + mutex_lock(&pool->mutex); + if (pool->high_count) + page = ion_page_pool_remove(pool, true); + else if (pool->low_count) + page = ion_page_pool_remove(pool, false); + mutex_unlock(&pool->mutex); + + if (!page) + page = ion_page_pool_alloc_pages(pool); + + return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ + int ret; + + ret = ion_page_pool_add(pool, page); + if (ret) + ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ + int total = 0; + + total += high ? (pool->high_count + pool->low_count) * + (1 << pool->order) : + pool->low_count * (1 << pool->order); + return total; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int i; + bool high; + + high = !!(gfp_mask & __GFP_HIGHMEM); + + for (i = 0; i < nr_to_scan; i++) { + struct page *page; + + mutex_lock(&pool->mutex); + if (pool->low_count) { + page = ion_page_pool_remove(pool, false); + } else if (high && pool->high_count) { + page = ion_page_pool_remove(pool, true); + } else { + mutex_unlock(&pool->mutex); + break; + } + mutex_unlock(&pool->mutex); + ion_page_pool_free_pages(pool, page); + } + + return ion_page_pool_total(pool, high); +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), + GFP_KERNEL); + if (!pool) + return NULL; + pool->high_count = 0; + pool->low_count = 0; + INIT_LIST_HEAD(&pool->low_items); + INIT_LIST_HEAD(&pool->high_items); + pool->gfp_mask = gfp_mask; + pool->order = order; + mutex_init(&pool->mutex); + plist_node_init(&pool->list, order); + + return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ + kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ + return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 000000000000..8871f35f384b --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,406 @@ +/* + * drivers/gpu/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/shrinker.h> +#include <linux/types.h> +#include <linux/device.h> + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @private_flags: internal buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging +*/ +struct ion_buffer { + struct kref ref; + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + unsigned long private_flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct sg_table *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); + int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, + struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_shrinker + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_shrink - drain the deferred free + * list, skipping any heap-specific + * pooling or caching mechanisms + * + * @heap: the heap + * @size: amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. + */ +size_t ion_heap_freelist_shrink(struct ion_heap *heap, + size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant peformance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant peformance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +void *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + * device + * @dev: the device the pages will be used with + * @page: the first page to be flushed + * @size: size in bytes of region to be flushed + * @dir: direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 000000000000..5945445ee5f3 --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,451 @@ +/* + * drivers/gpu/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/page.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | + __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ + int i; + + for (i = 0; i < num_orders; i++) + if (order == orders[i]) + return i; + BUG(); + return -1; +} + +static unsigned int order_to_size(int order) +{ + return PAGE_SIZE << order; +} + +struct ion_system_heap { + struct ion_heap heap; + struct ion_page_pool **pools; +}; + +struct page_info { + struct page *page; + unsigned int order; + struct list_head list; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long order) +{ + bool cached = ion_buffer_cached(buffer); + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + struct page *page; + + if (!cached) { + page = ion_page_pool_alloc(pool); + } else { + gfp_t gfp_flags = low_order_gfp_flags; + + if (order > 4) + gfp_flags = high_order_gfp_flags; + page = alloc_pages(gfp_flags, order); + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + } + if (!page) + return NULL; + + return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, struct page *page, + unsigned int order) +{ + bool cached = ion_buffer_cached(buffer); + + if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + + ion_page_pool_free(pool, page); + } else { + __free_pages(page, order); + } +} + + +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) +{ + struct page *page; + struct page_info *info; + int i; + + info = kmalloc(sizeof(struct page_info), GFP_KERNEL); + if (!info) + return NULL; + + for (i = 0; i < num_orders; i++) { + if (size < order_to_size(orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_buffer_page(heap, buffer, orders[i]); + if (!page) + continue; + + info->page = page; + info->order = orders[i]; + INIT_LIST_HEAD(&info->list); + return info; + } + kfree(info); + + return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table; + struct scatterlist *sg; + int ret; + struct list_head pages; + struct page_info *info, *tmp_info; + int i = 0; + unsigned long size_remaining = PAGE_ALIGN(size); + unsigned int max_order = orders[0]; + + if (align > PAGE_SIZE) + return -EINVAL; + + if (size / PAGE_SIZE > totalram_pages / 2) + return -ENOMEM; + + INIT_LIST_HEAD(&pages); + while (size_remaining > 0) { + info = alloc_largest_available(sys_heap, buffer, size_remaining, + max_order); + if (!info) + goto err; + list_add_tail(&info->list, &pages); + size_remaining -= (1 << info->order) * PAGE_SIZE; + max_order = info->order; + i++; + } + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + goto err; + + ret = sg_alloc_table(table, i, GFP_KERNEL); + if (ret) + goto err1; + + sg = table->sgl; + list_for_each_entry_safe(info, tmp_info, &pages, list) { + struct page *page = info->page; + sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); + sg = sg_next(sg); + list_del(&info->list); + kfree(info); + } + + buffer->priv_virt = table; + return 0; +err1: + kfree(table); +err: + list_for_each_entry_safe(info, tmp_info, &pages, list) { + free_buffer_page(sys_heap, buffer, info->page, info->order); + kfree(info); + } + return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table = buffer->sg_table; + bool cached = ion_buffer_cached(buffer); + struct scatterlist *sg; + LIST_HEAD(pages); + int i; + + /* uncached pages come from the page pools, zero them before returning + for security purposes (other allocations are zerod at alloc time */ + if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) + ion_heap_buffer_zero(buffer); + + for_each_sg(table->sgl, sg, table->nents, i) + free_buffer_page(sys_heap, buffer, sg_page(sg), + get_order(sg->length)); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, + int nr_to_scan) +{ + struct ion_system_heap *sys_heap; + int nr_total = 0; + int i; + + sys_heap = container_of(heap, struct ion_system_heap, heap); + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); + } + + return nr_total; +} + +static struct ion_heap_ops system_heap_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, + .shrink = ion_system_heap_shrink, +}; + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, + void *unused) +{ + + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", + pool->high_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->high_count); + seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", + pool->low_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->low_count); + } + return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_system_heap *heap; + int i; + + heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->heap.ops = &system_heap_ops; + heap->heap.type = ION_HEAP_TYPE_SYSTEM; + heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, + GFP_KERNEL); + if (!heap->pools) + goto err_alloc_pools; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; + + if (orders[i] > 4) + gfp_flags = high_order_gfp_flags; + pool = ion_page_pool_create(gfp_flags, orders[i]); + if (!pool) + goto err_create_pool; + heap->pools[i] = pool; + } + + heap->heap.debug_show = ion_system_heap_debug_show; + return &heap->heap; +err_create_pool: + for (i = 0; i < num_orders; i++) + if (heap->pools[i]) + ion_page_pool_destroy(heap->pools[i]); + kfree(heap->pools); +err_alloc_pools: + kfree(heap); + return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) + ion_page_pool_destroy(sys_heap->pools[i]); + kfree(sys_heap->pools); + kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + int order = get_order(len); + struct page *page; + struct sg_table *table; + unsigned long i; + int ret; + + if (align > (PAGE_SIZE << order)) + return -EINVAL; + + page = alloc_pages(low_order_gfp_flags, order); + if (!page) + return -ENOMEM; + + split_page(page, order); + + len = PAGE_ALIGN(len); + for (i = len >> PAGE_SHIFT; i < (1 << order); i++) + __free_page(page + i); + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto out; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto out; + + sg_set_page(table->sgl, page, len, 0); + + buffer->priv_virt = table; + + ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + + return 0; + +out: + for (i = 0; i < len >> PAGE_SHIFT; i++) + __free_page(page + i); + kfree(table); + return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; + unsigned long i; + + for (i = 0; i < pages; i++) + __free_page(page + i); + sg_free_table(table); + kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + *addr = page_to_phys(page); + *len = buffer->size; + return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_contig_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 000000000000..654acb5c8eba --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,282 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include <linux/dma-buf.h> +#include <linux/dma-direction.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { + struct miscdevice misc; +}; + +struct ion_test_data { + struct dma_buf *dma_buf; + struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, + void __user *ptr, size_t offset, size_t size, bool write) +{ + int ret = 0; + struct dma_buf_attachment *attach; + struct sg_table *table; + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + struct sg_page_iter sg_iter; + unsigned long offset_page; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) + return PTR_ERR(attach); + + table = dma_buf_map_attachment(attach, dir); + if (IS_ERR(table)) + return PTR_ERR(table); + + offset_page = offset >> PAGE_SHIFT; + offset %= PAGE_SIZE; + + for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { + struct page *page = sg_page_iter_page(&sg_iter); + void *vaddr = vmap(&page, 1, VM_MAP, pgprot); + size_t to_copy = PAGE_SIZE - offset; + + to_copy = min(to_copy, size); + if (!vaddr) { + ret = -ENOMEM; + goto err; + } + + if (write) + ret = copy_from_user(vaddr + offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + offset, to_copy); + + vunmap(vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + size -= to_copy; + if (!size) + break; + ptr += to_copy; + offset = 0; + } + +err: + dma_buf_unmap_attachment(attach, table, dir); + dma_buf_detach(dma_buf, attach); + return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, + size_t offset, size_t size, bool write) +{ + int ret; + unsigned long page_offset = offset >> PAGE_SHIFT; + size_t copy_offset = offset % PAGE_SIZE; + size_t copy_size = size; + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (offset > dma_buf->size || size > dma_buf->size - offset) + return -EINVAL; + + ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + if (ret) + return ret; + + while (copy_size > 0) { + size_t to_copy; + void *vaddr = dma_buf_kmap(dma_buf, page_offset); + + if (!vaddr) + goto err; + + to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + + if (write) + ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + + dma_buf_kunmap(dma_buf, page_offset, vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + + copy_size -= to_copy; + ptr += to_copy; + page_offset++; + copy_offset = 0; + } +err: + dma_buf_end_cpu_access(dma_buf, offset, size, dir); + return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ion_test_data *test_data = filp->private_data; + int ret = 0; + + union { + struct ion_test_rw_data test_rw; + } data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_TEST_SET_FD: + { + struct dma_buf *dma_buf = NULL; + int fd = arg; + + if (fd >= 0) { + dma_buf = dma_buf_get((int)arg); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + } + if (test_data->dma_buf) + dma_buf_put(test_data->dma_buf); + test_data->dma_buf = dma_buf; + break; + } + case ION_IOC_TEST_DMA_MAPPING: + { + ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + case ION_IOC_TEST_KERNEL_MAPPING: + { + ret = ion_handle_test_kernel(test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + } + return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ + struct ion_test_data *data; + struct miscdevice *miscdev = file->private_data; + + data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = miscdev->parent; + + file->private_data = data; + + return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ + struct ion_test_data *data = file->private_data; + + kfree(data); + + return 0; +} + +static const struct file_operations ion_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ion_test_ioctl, + .compat_ioctl = ion_test_ioctl, + .open = ion_test_open, + .release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ + int ret; + struct ion_test_device *testdev; + + testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), + GFP_KERNEL); + if (!testdev) + return -ENOMEM; + + testdev->misc.minor = MISC_DYNAMIC_MINOR; + testdev->misc.name = "ion-test"; + testdev->misc.fops = &ion_test_fops; + testdev->misc.parent = &pdev->dev; + ret = misc_register(&testdev->misc); + if (ret) { + pr_err("failed to register misc device.\n"); + return ret; + } + + platform_set_drvdata(pdev, testdev); + + return 0; +} + +static struct platform_driver ion_test_platform_driver = { + .driver = { + .name = "ion-test", + }, +}; + +static int __init ion_test_init(void) +{ + platform_device_register_simple("ion-test", -1, NULL, 0); + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ + platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 000000000000..11cd003fb08f --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 000000000000..3474c65f87fa --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,84 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c deleted file mode 100644 index 34519ea14b54..000000000000 --- a/drivers/staging/android/logger.c +++ /dev/null @@ -1,851 +0,0 @@ -/* - * drivers/misc/logger.c - * - * A Logging Subsystem - * - * Copyright (C) 2007-2008 Google, Inc. - * - * Robert Love <rlove@google.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) "logger: " fmt - -#include <linux/sched.h> -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/miscdevice.h> -#include <linux/uaccess.h> -#include <linux/poll.h> -#include <linux/slab.h> -#include <linux/time.h> -#include <linux/vmalloc.h> -#include <linux/aio.h> -#include "logger.h" - -#include <asm/ioctls.h> - -/** - * struct logger_log - represents a specific log, such as 'main' or 'radio' - * @buffer: The actual ring buffer - * @misc: The "misc" device representing the log - * @wq: The wait queue for @readers - * @readers: This log's readers - * @mutex: The mutex that protects the @buffer - * @w_off: The current write head offset - * @head: The head, or location that readers start reading at. - * @size: The size of the log - * @logs: The list of log channels - * - * This structure lives from module insertion until module removal, so it does - * not need additional reference counting. The structure is protected by the - * mutex 'mutex'. - */ -struct logger_log { - unsigned char *buffer; - struct miscdevice misc; - wait_queue_head_t wq; - struct list_head readers; - struct mutex mutex; - size_t w_off; - size_t head; - size_t size; - struct list_head logs; -}; - -static LIST_HEAD(log_list); - - -/** - * struct logger_reader - a logging device open for reading - * @log: The associated log - * @list: The associated entry in @logger_log's list - * @r_off: The current read head offset. - * @r_all: Reader can read all entries - * @r_ver: Reader ABI version - * - * This object lives from open to release, so we don't need additional - * reference counting. The structure is protected by log->mutex. - */ -struct logger_reader { - struct logger_log *log; - struct list_head list; - size_t r_off; - bool r_all; - int r_ver; -}; - -/* logger_offset - returns index 'n' into the log via (optimized) modulus */ -static size_t logger_offset(struct logger_log *log, size_t n) -{ - return n & (log->size - 1); -} - - -/* - * file_get_log - Given a file structure, return the associated log - * - * This isn't aesthetic. We have several goals: - * - * 1) Need to quickly obtain the associated log during an I/O operation - * 2) Readers need to maintain state (logger_reader) - * 3) Writers need to be very fast (open() should be a near no-op) - * - * In the reader case, we can trivially go file->logger_reader->logger_log. - * For a writer, we don't want to maintain a logger_reader, so we just go - * file->logger_log. Thus what file->private_data points at depends on whether - * or not the file was opened for reading. This function hides that dirtiness. - */ -static inline struct logger_log *file_get_log(struct file *file) -{ - if (file->f_mode & FMODE_READ) { - struct logger_reader *reader = file->private_data; - return reader->log; - } else - return file->private_data; -} - -/* - * get_entry_header - returns a pointer to the logger_entry header within - * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must - * be provided. Typically the return value will be a pointer within - * 'logger->buf'. However, a pointer to 'scratch' may be returned if - * the log entry spans the end and beginning of the circular buffer. - */ -static struct logger_entry *get_entry_header(struct logger_log *log, - size_t off, struct logger_entry *scratch) -{ - size_t len = min(sizeof(struct logger_entry), log->size - off); - if (len != sizeof(struct logger_entry)) { - memcpy(((void *) scratch), log->buffer + off, len); - memcpy(((void *) scratch) + len, log->buffer, - sizeof(struct logger_entry) - len); - return scratch; - } - - return (struct logger_entry *) (log->buffer + off); -} - -/* - * get_entry_msg_len - Grabs the length of the message of the entry - * starting from from 'off'. - * - * An entry length is 2 bytes (16 bits) in host endian order. - * In the log, the length does not include the size of the log entry structure. - * This function returns the size including the log entry structure. - * - * Caller needs to hold log->mutex. - */ -static __u32 get_entry_msg_len(struct logger_log *log, size_t off) -{ - struct logger_entry scratch; - struct logger_entry *entry; - - entry = get_entry_header(log, off, &scratch); - return entry->len; -} - -static size_t get_user_hdr_len(int ver) -{ - if (ver < 2) - return sizeof(struct user_logger_entry_compat); - else - return sizeof(struct logger_entry); -} - -static ssize_t copy_header_to_user(int ver, struct logger_entry *entry, - char __user *buf) -{ - void *hdr; - size_t hdr_len; - struct user_logger_entry_compat v1; - - if (ver < 2) { - v1.len = entry->len; - v1.__pad = 0; - v1.pid = entry->pid; - v1.tid = entry->tid; - v1.sec = entry->sec; - v1.nsec = entry->nsec; - hdr = &v1; - hdr_len = sizeof(struct user_logger_entry_compat); - } else { - hdr = entry; - hdr_len = sizeof(struct logger_entry); - } - - return copy_to_user(buf, hdr, hdr_len); -} - -/* - * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the - * user-space buffer 'buf'. Returns 'count' on success. - * - * Caller must hold log->mutex. - */ -static ssize_t do_read_log_to_user(struct logger_log *log, - struct logger_reader *reader, - char __user *buf, - size_t count) -{ - struct logger_entry scratch; - struct logger_entry *entry; - size_t len; - size_t msg_start; - - /* - * First, copy the header to userspace, using the version of - * the header requested - */ - entry = get_entry_header(log, reader->r_off, &scratch); - if (copy_header_to_user(reader->r_ver, entry, buf)) - return -EFAULT; - - count -= get_user_hdr_len(reader->r_ver); - buf += get_user_hdr_len(reader->r_ver); - msg_start = logger_offset(log, - reader->r_off + sizeof(struct logger_entry)); - - /* - * We read from the msg in two disjoint operations. First, we read from - * the current msg head offset up to 'count' bytes or to the end of - * the log, whichever comes first. - */ - len = min(count, log->size - msg_start); - if (copy_to_user(buf, log->buffer + msg_start, len)) - return -EFAULT; - - /* - * Second, we read any remaining bytes, starting back at the head of - * the log. - */ - if (count != len) - if (copy_to_user(buf + len, log->buffer, count - len)) - return -EFAULT; - - reader->r_off = logger_offset(log, reader->r_off + - sizeof(struct logger_entry) + count); - - return count + get_user_hdr_len(reader->r_ver); -} - -/* - * get_next_entry_by_uid - Starting at 'off', returns an offset into - * 'log->buffer' which contains the first entry readable by 'euid' - */ -static size_t get_next_entry_by_uid(struct logger_log *log, - size_t off, kuid_t euid) -{ - while (off != log->w_off) { - struct logger_entry *entry; - struct logger_entry scratch; - size_t next_len; - - entry = get_entry_header(log, off, &scratch); - - if (uid_eq(entry->euid, euid)) - return off; - - next_len = sizeof(struct logger_entry) + entry->len; - off = logger_offset(log, off + next_len); - } - - return off; -} - -/* - * logger_read - our log's read() method - * - * Behavior: - * - * - O_NONBLOCK works - * - If there are no log entries to read, blocks until log is written to - * - Atomically reads exactly one log entry - * - * Will set errno to EINVAL if read - * buffer is insufficient to hold next entry. - */ -static ssize_t logger_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - struct logger_reader *reader = file->private_data; - struct logger_log *log = reader->log; - ssize_t ret; - DEFINE_WAIT(wait); - -start: - while (1) { - mutex_lock(&log->mutex); - - prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); - - ret = (log->w_off == reader->r_off); - mutex_unlock(&log->mutex); - if (!ret) - break; - - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - schedule(); - } - - finish_wait(&log->wq, &wait); - if (ret) - return ret; - - mutex_lock(&log->mutex); - - if (!reader->r_all) - reader->r_off = get_next_entry_by_uid(log, - reader->r_off, current_euid()); - - /* is there still something to read or did we race? */ - if (unlikely(log->w_off == reader->r_off)) { - mutex_unlock(&log->mutex); - goto start; - } - - /* get the size of the next entry */ - ret = get_user_hdr_len(reader->r_ver) + - get_entry_msg_len(log, reader->r_off); - if (count < ret) { - ret = -EINVAL; - goto out; - } - - /* get exactly one entry from the log */ - ret = do_read_log_to_user(log, reader, buf, ret); - -out: - mutex_unlock(&log->mutex); - - return ret; -} - -/* - * get_next_entry - return the offset of the first valid entry at least 'len' - * bytes after 'off'. - * - * Caller must hold log->mutex. - */ -static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) -{ - size_t count = 0; - - do { - size_t nr = sizeof(struct logger_entry) + - get_entry_msg_len(log, off); - off = logger_offset(log, off + nr); - count += nr; - } while (count < len); - - return off; -} - -/* - * is_between - is a < c < b, accounting for wrapping of a, b, and c - * positions in the buffer - * - * That is, if a<b, check for c between a and b - * and if a>b, check for c outside (not between) a and b - * - * |------- a xxxxxxxx b --------| - * c^ - * - * |xxxxx b --------- a xxxxxxxxx| - * c^ - * or c^ - */ -static inline int is_between(size_t a, size_t b, size_t c) -{ - if (a < b) { - /* is c between a and b? */ - if (a < c && c <= b) - return 1; - } else { - /* is c outside of b through a? */ - if (c <= b || a < c) - return 1; - } - - return 0; -} - -/* - * fix_up_readers - walk the list of all readers and "fix up" any who were - * lapped by the writer; also do the same for the default "start head". - * We do this by "pulling forward" the readers and start head to the first - * entry after the new write head. - * - * The caller needs to hold log->mutex. - */ -static void fix_up_readers(struct logger_log *log, size_t len) -{ - size_t old = log->w_off; - size_t new = logger_offset(log, old + len); - struct logger_reader *reader; - - if (is_between(old, new, log->head)) - log->head = get_next_entry(log, log->head, len); - - list_for_each_entry(reader, &log->readers, list) - if (is_between(old, new, reader->r_off)) - reader->r_off = get_next_entry(log, reader->r_off, len); -} - -/* - * do_write_log - writes 'len' bytes from 'buf' to 'log' - * - * The caller needs to hold log->mutex. - */ -static void do_write_log(struct logger_log *log, const void *buf, size_t count) -{ - size_t len; - - len = min(count, log->size - log->w_off); - memcpy(log->buffer + log->w_off, buf, len); - - if (count != len) - memcpy(log->buffer, buf + len, count - len); - - log->w_off = logger_offset(log, log->w_off + count); - -} - -/* - * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to - * the log 'log' - * - * The caller needs to hold log->mutex. - * - * Returns 'count' on success, negative error code on failure. - */ -static ssize_t do_write_log_from_user(struct logger_log *log, - const void __user *buf, size_t count) -{ - size_t len; - - len = min(count, log->size - log->w_off); - if (len && copy_from_user(log->buffer + log->w_off, buf, len)) - return -EFAULT; - - if (count != len) - if (copy_from_user(log->buffer, buf + len, count - len)) - /* - * Note that by not updating w_off, this abandons the - * portion of the new entry that *was* successfully - * copied, just above. This is intentional to avoid - * message corruption from missing fragments. - */ - return -EFAULT; - - log->w_off = logger_offset(log, log->w_off + count); - - return count; -} - -/* - * logger_aio_write - our write method, implementing support for write(), - * writev(), and aio_write(). Writes are our fast path, and we try to optimize - * them above all else. - */ -static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t ppos) -{ - struct logger_log *log = file_get_log(iocb->ki_filp); - size_t orig; - struct logger_entry header; - struct timespec now; - ssize_t ret = 0; - - now = current_kernel_time(); - - header.pid = current->tgid; - header.tid = current->pid; - header.sec = now.tv_sec; - header.nsec = now.tv_nsec; - header.euid = current_euid(); - header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); - header.hdr_size = sizeof(struct logger_entry); - - /* null writes succeed, return zero */ - if (unlikely(!header.len)) - return 0; - - mutex_lock(&log->mutex); - - orig = log->w_off; - - /* - * Fix up any readers, pulling them forward to the first readable - * entry after (what will be) the new write offset. We do this now - * because if we partially fail, we can end up with clobbered log - * entries that encroach on readable buffer. - */ - fix_up_readers(log, sizeof(struct logger_entry) + header.len); - - do_write_log(log, &header, sizeof(struct logger_entry)); - - while (nr_segs-- > 0) { - size_t len; - ssize_t nr; - - /* figure out how much of this vector we can keep */ - len = min_t(size_t, iov->iov_len, header.len - ret); - - /* write out this segment's payload */ - nr = do_write_log_from_user(log, iov->iov_base, len); - if (unlikely(nr < 0)) { - log->w_off = orig; - mutex_unlock(&log->mutex); - return nr; - } - - iov++; - ret += nr; - } - - mutex_unlock(&log->mutex); - - /* wake up any blocked readers */ - wake_up_interruptible(&log->wq); - - return ret; -} - -static struct logger_log *get_log_from_minor(int minor) -{ - struct logger_log *log; - - list_for_each_entry(log, &log_list, logs) - if (log->misc.minor == minor) - return log; - return NULL; -} - -/* - * logger_open - the log's open() file operation - * - * Note how near a no-op this is in the write-only case. Keep it that way! - */ -static int logger_open(struct inode *inode, struct file *file) -{ - struct logger_log *log; - int ret; - - ret = nonseekable_open(inode, file); - if (ret) - return ret; - - log = get_log_from_minor(MINOR(inode->i_rdev)); - if (!log) - return -ENODEV; - - if (file->f_mode & FMODE_READ) { - struct logger_reader *reader; - - reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL); - if (!reader) - return -ENOMEM; - - reader->log = log; - reader->r_ver = 1; - reader->r_all = in_egroup_p(inode->i_gid) || - capable(CAP_SYSLOG); - - INIT_LIST_HEAD(&reader->list); - - mutex_lock(&log->mutex); - reader->r_off = log->head; - list_add_tail(&reader->list, &log->readers); - mutex_unlock(&log->mutex); - - file->private_data = reader; - } else - file->private_data = log; - - return 0; -} - -/* - * logger_release - the log's release file operation - * - * Note this is a total no-op in the write-only case. Keep it that way! - */ -static int logger_release(struct inode *ignored, struct file *file) -{ - if (file->f_mode & FMODE_READ) { - struct logger_reader *reader = file->private_data; - struct logger_log *log = reader->log; - - mutex_lock(&log->mutex); - list_del(&reader->list); - mutex_unlock(&log->mutex); - - kfree(reader); - } - - return 0; -} - -/* - * logger_poll - the log's poll file operation, for poll/select/epoll - * - * Note we always return POLLOUT, because you can always write() to the log. - * Note also that, strictly speaking, a return value of POLLIN does not - * guarantee that the log is readable without blocking, as there is a small - * chance that the writer can lap the reader in the interim between poll() - * returning and the read() request. - */ -static unsigned int logger_poll(struct file *file, poll_table *wait) -{ - struct logger_reader *reader; - struct logger_log *log; - unsigned int ret = POLLOUT | POLLWRNORM; - - if (!(file->f_mode & FMODE_READ)) - return ret; - - reader = file->private_data; - log = reader->log; - - poll_wait(file, &log->wq, wait); - - mutex_lock(&log->mutex); - if (!reader->r_all) - reader->r_off = get_next_entry_by_uid(log, - reader->r_off, current_euid()); - - if (log->w_off != reader->r_off) - ret |= POLLIN | POLLRDNORM; - mutex_unlock(&log->mutex); - - return ret; -} - -static long logger_set_version(struct logger_reader *reader, void __user *arg) -{ - int version; - if (copy_from_user(&version, arg, sizeof(int))) - return -EFAULT; - - if ((version < 1) || (version > 2)) - return -EINVAL; - - reader->r_ver = version; - return 0; -} - -static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct logger_log *log = file_get_log(file); - struct logger_reader *reader; - long ret = -EINVAL; - void __user *argp = (void __user *) arg; - - mutex_lock(&log->mutex); - - switch (cmd) { - case LOGGER_GET_LOG_BUF_SIZE: - ret = log->size; - break; - case LOGGER_GET_LOG_LEN: - if (!(file->f_mode & FMODE_READ)) { - ret = -EBADF; - break; - } - reader = file->private_data; - if (log->w_off >= reader->r_off) - ret = log->w_off - reader->r_off; - else - ret = (log->size - reader->r_off) + log->w_off; - break; - case LOGGER_GET_NEXT_ENTRY_LEN: - if (!(file->f_mode & FMODE_READ)) { - ret = -EBADF; - break; - } - reader = file->private_data; - - if (!reader->r_all) - reader->r_off = get_next_entry_by_uid(log, - reader->r_off, current_euid()); - - if (log->w_off != reader->r_off) - ret = get_user_hdr_len(reader->r_ver) + - get_entry_msg_len(log, reader->r_off); - else - ret = 0; - break; - case LOGGER_FLUSH_LOG: - if (!(file->f_mode & FMODE_WRITE)) { - ret = -EBADF; - break; - } - if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) || - capable(CAP_SYSLOG))) { - ret = -EPERM; - break; - } - list_for_each_entry(reader, &log->readers, list) - reader->r_off = log->w_off; - log->head = log->w_off; - ret = 0; - break; - case LOGGER_GET_VERSION: - if (!(file->f_mode & FMODE_READ)) { - ret = -EBADF; - break; - } - reader = file->private_data; - ret = reader->r_ver; - break; - case LOGGER_SET_VERSION: - if (!(file->f_mode & FMODE_READ)) { - ret = -EBADF; - break; - } - reader = file->private_data; - ret = logger_set_version(reader, argp); - break; - } - - mutex_unlock(&log->mutex); - - return ret; -} - -static const struct file_operations logger_fops = { - .owner = THIS_MODULE, - .read = logger_read, - .aio_write = logger_aio_write, - .poll = logger_poll, - .unlocked_ioctl = logger_ioctl, - .compat_ioctl = logger_ioctl, - .open = logger_open, - .release = logger_release, -}; - -/* - * Log size must must be a power of two, and greater than - * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)). - */ -static int __init create_log(char *log_name, int size) -{ - int ret = 0; - struct logger_log *log; - unsigned char *buffer; - - buffer = vmalloc(size); - if (buffer == NULL) - return -ENOMEM; - - log = kzalloc(sizeof(struct logger_log), GFP_KERNEL); - if (log == NULL) { - ret = -ENOMEM; - goto out_free_buffer; - } - log->buffer = buffer; - - log->misc.minor = MISC_DYNAMIC_MINOR; - log->misc.name = kstrdup(log_name, GFP_KERNEL); - if (log->misc.name == NULL) { - ret = -ENOMEM; - goto out_free_log; - } - - log->misc.fops = &logger_fops; - log->misc.parent = NULL; - - init_waitqueue_head(&log->wq); - INIT_LIST_HEAD(&log->readers); - mutex_init(&log->mutex); - log->w_off = 0; - log->head = 0; - log->size = size; - - INIT_LIST_HEAD(&log->logs); - list_add_tail(&log->logs, &log_list); - - /* finally, initialize the misc device for this log */ - ret = misc_register(&log->misc); - if (unlikely(ret)) { - pr_err("failed to register misc device for log '%s'!\n", - log->misc.name); - goto out_free_log; - } - - pr_info("created %luK log '%s'\n", - (unsigned long) log->size >> 10, log->misc.name); - - return 0; - -out_free_log: - kfree(log); - -out_free_buffer: - vfree(buffer); - return ret; -} - -static int __init logger_init(void) -{ - int ret; - - ret = create_log(LOGGER_LOG_MAIN, 256*1024); - if (unlikely(ret)) - goto out; - - ret = create_log(LOGGER_LOG_EVENTS, 256*1024); - if (unlikely(ret)) - goto out; - - ret = create_log(LOGGER_LOG_RADIO, 256*1024); - if (unlikely(ret)) - goto out; - - ret = create_log(LOGGER_LOG_SYSTEM, 256*1024); - if (unlikely(ret)) - goto out; - -out: - return ret; -} - -static void __exit logger_exit(void) -{ - struct logger_log *current_log, *next_log; - - list_for_each_entry_safe(current_log, next_log, &log_list, logs) { - /* we have to delete all the entry inside log_list */ - misc_deregister(¤t_log->misc); - vfree(current_log->buffer); - kfree(current_log->misc.name); - list_del(¤t_log->logs); - kfree(current_log); - } -} - - -device_initcall(logger_init); -module_exit(logger_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Robert Love, <rlove@google.com>"); -MODULE_DESCRIPTION("Android Logger"); diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h deleted file mode 100644 index 70af7d805dff..000000000000 --- a/drivers/staging/android/logger.h +++ /dev/null @@ -1,89 +0,0 @@ -/* include/linux/logger.h - * - * Copyright (C) 2007-2008 Google, Inc. - * Author: Robert Love <rlove@android.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _LINUX_LOGGER_H -#define _LINUX_LOGGER_H - -#include <linux/types.h> -#include <linux/ioctl.h> - -/** - * struct user_logger_entry_compat - defines a single entry that is given to a logger - * @len: The length of the payload - * @__pad: Two bytes of padding that appear to be required - * @pid: The generating process' process ID - * @tid: The generating process' thread ID - * @sec: The number of seconds that have elapsed since the Epoch - * @nsec: The number of nanoseconds that have elapsed since @sec - * @msg: The message that is to be logged - * - * The userspace structure for version 1 of the logger_entry ABI. - * This structure is returned to userspace unless the caller requests - * an upgrade to a newer ABI version. - */ -struct user_logger_entry_compat { - __u16 len; - __u16 __pad; - __s32 pid; - __s32 tid; - __s32 sec; - __s32 nsec; - char msg[0]; -}; - -/** - * struct logger_entry - defines a single entry that is given to a logger - * @len: The length of the payload - * @hdr_size: sizeof(struct logger_entry_v2) - * @pid: The generating process' process ID - * @tid: The generating process' thread ID - * @sec: The number of seconds that have elapsed since the Epoch - * @nsec: The number of nanoseconds that have elapsed since @sec - * @euid: Effective UID of logger - * @msg: The message that is to be logged - * - * The structure for version 2 of the logger_entry ABI. - * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION) - * is called with version >= 2 - */ -struct logger_entry { - __u16 len; - __u16 hdr_size; - __s32 pid; - __s32 tid; - __s32 sec; - __s32 nsec; - kuid_t euid; - char msg[0]; -}; - -#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ -#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ -#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */ -#define LOGGER_LOG_MAIN "log_main" /* everything else */ - -#define LOGGER_ENTRY_MAX_PAYLOAD 4076 - -#define __LOGGERIO 0xAE - -#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ -#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ -#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ -#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ -#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */ -#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */ - -#endif /* _LINUX_LOGGER_H */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index fe74494868ef..a56e0894f668 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -39,7 +39,6 @@ #include <linux/sched.h> #include <linux/swap.h> #include <linux/rcupdate.h> -#include <linux/profile.h> #include <linux/notifier.h> static uint32_t lowmem_debug_level = 1; @@ -74,6 +73,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); @@ -86,8 +86,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { - if (other_free < lowmem_minfree[i] && - other_file < lowmem_minfree[i]) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } @@ -144,13 +144,22 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; - lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n", - p->pid, p->comm, oom_score_adj, tasksize); + lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { - lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n", - selected->pid, selected->comm, - selected_oom_score_adj, selected_tasksize); + lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \ + " to free %ldkB on behalf of '%s' (%d) because\n" \ + " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \ + " Free memory is %ldkB above reserved\n", + selected->comm, selected->pid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + other_file * (long)(PAGE_SIZE / 1024), + minfree * (long)(PAGE_SIZE / 1024), + min_score_adj, + other_free * (long)(PAGE_SIZE / 1024)); lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); @@ -178,9 +187,94 @@ static void __exit lowmem_exit(void) unregister_shrinker(&lowmem_shrinker); } +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +static short lowmem_oom_adj_to_oom_score_adj(short oom_adj) +{ + if (oom_adj == OOM_ADJUST_MAX) + return OOM_SCORE_ADJ_MAX; + else + return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; +} + +static void lowmem_autodetect_oom_adj_values(void) +{ + int i; + short oom_adj; + short oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + + if (array_size <= 0) + return; + + oom_adj = lowmem_adj[array_size - 1]; + if (oom_adj > OOM_ADJUST_MAX) + return; + + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + if (oom_score_adj <= OOM_ADJUST_MAX) + return; + + lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n"); + for (i = 0; i < array_size; i++) { + oom_adj = lowmem_adj[i]; + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + lowmem_adj[i] = oom_score_adj; + lowmem_print(1, "oom_adj %d => oom_score_adj %d\n", + oom_adj, oom_score_adj); + } +} + +static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp) +{ + int ret; + + ret = param_array_ops.set(val, kp); + + /* HACK: Autodetect oom_adj values in lowmem_adj array */ + lowmem_autodetect_oom_adj_values(); + + return ret; +} + +static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp) +{ + return param_array_ops.get(buffer, kp); +} + +static void lowmem_adj_array_free(void *arg) +{ + param_array_ops.free(arg); +} + +static struct kernel_param_ops lowmem_adj_array_ops = { + .set = lowmem_adj_array_set, + .get = lowmem_adj_array_get, + .free = lowmem_adj_array_free, +}; + +static const struct kparam_array __param_arr_adj = { + .max = ARRAY_SIZE(lowmem_adj), + .num = &lowmem_adj_size, + .ops = ¶m_ops_short, + .elemsize = sizeof(lowmem_adj[0]), + .elem = lowmem_adj, +}; +#endif + module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +__module_param_call(MODULE_PARAM_PREFIX, adj, + &lowmem_adj_array_ops, + .arr = &__param_arr_adj, + S_IRUGO | S_IWUSR, -1); +__MODULE_PARM_TYPE(adj, "array of short"); +#else module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, S_IRUGO | S_IWUSR); +#endif module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index 4928f93bdf3d..820af5cfb830 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c @@ -97,6 +97,7 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size) { struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + snprintf(str, size, "%d", pt->value); } @@ -156,6 +157,7 @@ static int sw_sync_open(struct inode *inode, struct file *file) static int sw_sync_release(struct inode *inode, struct file *file) { struct sw_sync_timeline *obj = file->private_data; + sync_timeline_destroy(&obj->obj); return 0; } diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f18..1a50669ec8a9 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -18,10 +18,9 @@ #define _LINUX_SW_SYNC_H #include <linux/types.h> - -#ifdef __KERNEL__ - +#include <linux/kconfig.h> #include "sync.h" +#include "uapi/sw_sync.h" struct sw_sync_timeline { struct sync_timeline obj; @@ -35,24 +34,26 @@ struct sw_sync_pt { u32 value; }; +#if IS_ENABLED(CONFIG_SW_SYNC) struct sw_sync_timeline *sw_sync_timeline_create(const char *name); void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); - -#endif /* __KERNEL __ */ - -struct sw_sync_create_fence_data { - __u32 value; - char name[32]; - __s32 fence; /* fd of new fence */ -}; - -#define SW_SYNC_IOC_MAGIC 'W' - -#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ - struct sw_sync_create_fence_data) -#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) - +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */ #endif /* _LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 3893a3574769..61e624905916 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref) container_of(kref, struct sync_timeline, kref); unsigned long flags; - if (obj->ops->release_obj) - obj->ops->release_obj(obj); - spin_lock_irqsave(&sync_timeline_list_lock, flags); list_del(&obj->sync_timeline_list); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + kfree(obj); } void sync_timeline_destroy(struct sync_timeline *obj) { obj->destroyed = true; + smp_wmb(); /* - * If this is not the last reference, signal any children - * that their parent is going away. + * signal any children that their parent is going away. */ + sync_timeline_signal(obj); - if (!kref_put(&obj->kref, sync_timeline_free)) - sync_timeline_signal(obj); + kref_put(&obj->kref, sync_timeline_free); } EXPORT_SYMBOL(sync_timeline_destroy); @@ -384,6 +384,7 @@ static void sync_fence_detach_pts(struct sync_fence *fence) list_for_each_safe(pos, n, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_timeline_remove_pt(pt); } } @@ -394,6 +395,7 @@ static void sync_fence_free_pts(struct sync_fence *fence) list_for_each_safe(pos, n, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_pt_free(pt); } } @@ -827,6 +829,7 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sync_fence *fence = file->private_data; + switch (cmd) { case SYNC_IOC_WAIT: return sync_fence_ioctl_wait(fence, arg); @@ -856,18 +859,21 @@ static const char *sync_status_str(int status) static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) { int status = pt->status; + seq_printf(s, " %s%spt %s", fence ? pt->parent->name : "", fence ? "_" : "", sync_status_str(status)); if (pt->status) { struct timeval tv = ktime_to_timeval(pt->timestamp); + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); } if (pt->parent->ops->timeline_value_str && pt->parent->ops->pt_value_str) { char value[64]; + pt->parent->ops->pt_value_str(pt, value, sizeof(value)); seq_printf(s, ": %s", value); if (fence) { @@ -892,6 +898,7 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) if (obj->ops->timeline_value_str) { char value[64]; + obj->ops->timeline_value_str(obj, value, sizeof(value)); seq_printf(s, ": %s", value); } else if (obj->ops->print_obj) { @@ -1001,6 +1008,7 @@ void sync_dump(void) for (i = 0; i < s.count; i += DUMP_CHUNK) { if ((s.count - i) > DUMP_CHUNK) { char c = s.buf[i + DUMP_CHUNK]; + s.buf[i + DUMP_CHUNK] = 0; pr_cont("%s", s.buf + i); s.buf[i + DUMP_CHUNK] = c; diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index 38ea986dc70f..75da9e85ac69 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -14,14 +14,14 @@ #define _LINUX_SYNC_H #include <linux/types.h> -#ifdef __KERNEL__ - #include <linux/kref.h> #include <linux/ktime.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/wait.h> +#include "uapi/sync.h" + struct sync_timeline; struct sync_pt; struct sync_fence; @@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence, */ int sync_fence_wait(struct sync_fence *fence, long timeout); -#endif /* __KERNEL__ */ - -/** - * struct sync_merge_data - data passed to merge ioctl - * @fd2: file descriptor of second fence - * @name: name of new fence - * @fence: returns the fd of the new fence to userspace - */ -struct sync_merge_data { - __s32 fd2; /* fd of second fence */ - char name[32]; /* name of new fence */ - __s32 fence; /* fd on newly created fence */ -}; - -/** - * struct sync_pt_info - detailed sync_pt information - * @len: length of sync_pt_info including any driver_data - * @obj_name: name of parent sync_timeline - * @driver_name: name of driver implmenting the parent - * @status: status of the sync_pt 0:active 1:signaled <0:error - * @timestamp_ns: timestamp of status change in nanoseconds - * @driver_data: any driver dependant data - */ -struct sync_pt_info { - __u32 len; - char obj_name[32]; - char driver_name[32]; - __s32 status; - __u64 timestamp_ns; - - __u8 driver_data[0]; -}; - -/** - * struct sync_fence_info_data - data returned from fence info ioctl - * @len: ioctl caller writes the size of the buffer its passing in. - * ioctl returns length of sync_fence_data reutnred to userspace - * including pt_info. - * @name: name of fence - * @status: status of fence. 1: signaled 0:active <0:error - * @pt_info: a sync_pt_info struct for every sync_pt in the fence - */ -struct sync_fence_info_data { - __u32 len; - char name[32]; - __s32 status; - - __u8 pt_info[0]; -}; - -#define SYNC_IOC_MAGIC '>' - -/** - * DOC: SYNC_IOC_WAIT - wait for a fence to signal - * - * pass timeout in milliseconds. Waits indefinitely timeout < 0. - */ -#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) - -/** - * DOC: SYNC_IOC_MERGE - merge two fences - * - * Takes a struct sync_merge_data. Creates a new fence containing copies of - * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the - * new fence's fd in sync_merge_data.fence - */ -#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) - -/** - * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence - * - * Takes a struct sync_fence_info_data with extra space allocated for pt_info. - * Caller should write the size of the buffer into len. On return, len is - * updated to reflect the total size of the sync_fence_info_data including - * pt_info. - * - * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. - * To itterate over the sync_pt_infos, use the sync_pt_info.len field. - */ -#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ - struct sync_fence_info_data) - #endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index e81451425c01..ae9966d1f7cc 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -51,6 +51,7 @@ static int gpio_get_time(struct timed_output_dev *dev) if (hrtimer_active(&data->timer)) { ktime_t r = hrtimer_get_remaining(&data->timer); struct timeval t = ktime_to_timeval(r); + return t.tv_sec * 1000 + t.tv_usec / 1000; } else return 0; diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h new file mode 100644 index 000000000000..13df42d200b7 --- /dev/null +++ b/drivers/staging/android/uapi/ashmem.h @@ -0,0 +1,48 @@ +/* + * drivers/staging/android/uapi/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _UAPI_LINUX_ASHMEM_H +#define _UAPI_LINUX_ASHMEM_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define ASHMEM_NAME_LEN 256 + +#define ASHMEM_NAME_DEF "dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED 0 +#define ASHMEM_WAS_PURGED 1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED 0 +#define ASHMEM_IS_PINNED 1 + +struct ashmem_pin { + __u32 offset; /* offset into region, in bytes, page-aligned */ + __u32 len; /* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC 0x77 + +#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) + +#endif /* _UAPI_LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 000000000000..f09e7c154d69 --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask + * is used to identify the heaps, so only 32 + * total heap types are supported + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8 + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created + at mmap time, if this is set + caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @heap_id_mask: mask of heap ids to allocate from + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to + * refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_id_mask; + unsigned int flags; + ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + ion_user_handle_t handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 000000000000..ffef06f63133 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr: a pointer to an area at least as large as size + * @offset: offset into the ion buffer to start reading + * @size: size to read or write + * @write: 1 to write, 0 to read + */ +struct ion_test_rw_data { + __u64 ptr; + __u64 offset; + __u64 size; + int write; + int __padding; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ + _IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping. Can be + * used by unit tests to emulate a DMA engine as close as possible. Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping. Can be + * used by unit tests to test heap map_kernel functions. Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h new file mode 100644 index 000000000000..9b5d4869505c --- /dev/null +++ b/drivers/staging/android/uapi/sw_sync.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SW_SYNC_H +#define _UAPI_LINUX_SW_SYNC_H + +#include <linux/types.h> + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC 'W' + +#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ + struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +#endif /* _UAPI_LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h new file mode 100644 index 000000000000..57fdaadc4b04 --- /dev/null +++ b/drivers/staging/android/uapi/sync.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SYNC_H +#define _UAPI_LINUX_SYNC_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len: length of sync_pt_info including any driver_data + * @obj_name: name of parent sync_timeline + * @driver_name: name of driver implmenting the parent + * @status: status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns: timestamp of status change in nanoseconds + * @driver_data: any driver dependant data + */ +struct sync_pt_info { + __u32 len; + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u64 timestamp_ns; + + __u8 driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len: ioctl caller writes the size of the buffer its passing in. + * ioctl returns length of sync_fence_data reutnred to userspace + * including pt_info. + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @pt_info: a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { + __u32 len; + char name[32]; + __s32 status; + + __u8 pt_info[0]; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ + struct sync_fence_info_data) + +#endif /* _UAPI_LINUX_SYNC_H */ |