diff options
Diffstat (limited to 'driver/product/kernel/drivers/gpu/drm/pl111')
19 files changed, 4247 insertions, 0 deletions
diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/Kbuild b/driver/product/kernel/drivers/gpu/drm/pl111/Kbuild new file mode 100755 index 0000000..f10d58c --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/Kbuild @@ -0,0 +1,28 @@ +# +# (C) COPYRIGHT ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + +pl111_drm-y += pl111_drm_device.o \ + pl111_drm_connector.o \ + pl111_drm_crtc.o \ + pl111_drm_cursor.o \ + pl111_drm_dma_buf.o \ + pl111_drm_encoder.o \ + pl111_drm_fb.o \ + pl111_drm_gem.o \ + pl111_drm_pl111.o \ + pl111_drm_platform.o \ + pl111_drm_suspend.o \ + pl111_drm_vma.o + +obj-$(CONFIG_DRM_PL111) += pl111_drm.o diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/Kconfig b/driver/product/kernel/drivers/gpu/drm/pl111/Kconfig new file mode 100755 index 0000000..60b465c --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/Kconfig @@ -0,0 +1,23 @@ +# +# (C) COPYRIGHT ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + +config DRM_PL111 + tristate "DRM Support for PL111 CLCD Controller" + depends on DRM + select DRM_KMS_HELPER + select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE + help + Choose this option for DRM support for the PL111 CLCD controller. + If M is selected the module will be called pl111_drm. + diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/Makefile b/driver/product/kernel/drivers/gpu/drm/pl111/Makefile new file mode 100755 index 0000000..2869f58 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/Makefile @@ -0,0 +1,32 @@ +# +# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +# linux build system bootstrap for out-of-tree module + +# default to building for the host +ARCH ?= $(shell uname -m) + +ifeq ($(KDIR),) +$(error Must specify KDIR to point to the kernel to target)) +endif + +all: pl111_drm + +pl111_drm: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_DMA_SHARED_BUFFER_USES_KDS=y CONFIG_DRM_PL111=m + +clean: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean + diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h new file mode 100755 index 0000000..d3e0086 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h @@ -0,0 +1,95 @@ +/* + * + * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +/** + * pl111_clcd_ext.h + * Extended CLCD register definitions + */ + +#ifndef PL111_CLCD_EXT_H_ +#define PL111_CLCD_EXT_H_ + +/* + * PL111 cursor register definitions not defined in the kernel's clcd header. + * + * TODO MIDEGL-1718: move to include/linux/amba/clcd.h + */ + +#define CLCD_CRSR_IMAGE 0x00000800 +#define CLCD_CRSR_IMAGE_MAX_WORDS 256 +#define CLCD_CRSR_IMAGE_WORDS_PER_LINE 4 +#define CLCD_CRSR_IMAGE_PIXELS_PER_WORD 16 + +#define CLCD_CRSR_LBBP_COLOR_MASK 0x00000003 +#define CLCD_CRSR_LBBP_BACKGROUND 0x0 +#define CLCD_CRSR_LBBP_FOREGROUND 0x1 +#define CLCD_CRSR_LBBP_TRANSPARENT 0x2 +#define CLCD_CRSR_LBBP_INVERSE 0x3 + + +#define CLCD_CRSR_CTRL 0x00000c00 +#define CLCD_CRSR_CONFIG 0x00000c04 +#define CLCD_CRSR_PALETTE_0 0x00000c08 +#define CLCD_CRSR_PALETTE_1 0x00000c0c +#define CLCD_CRSR_XY 0x00000c10 +#define CLCD_CRSR_CLIP 0x00000c14 +#define CLCD_CRSR_IMSC 0x00000c20 +#define CLCD_CRSR_ICR 0x00000c24 +#define CLCD_CRSR_RIS 0x00000c28 +#define CLCD_MIS 0x00000c2c + +#define CRSR_CTRL_CRSR_ON (1 << 0) +#define CRSR_CTRL_CRSR_MAX 3 +#define CRSR_CTRL_CRSR_NUM_SHIFT 4 +#define CRSR_CTRL_CRSR_NUM_MASK \ + (CRSR_CTRL_CRSR_MAX << CRSR_CTRL_CRSR_NUM_SHIFT) +#define CRSR_CTRL_CURSOR_0 0 +#define CRSR_CTRL_CURSOR_1 1 +#define CRSR_CTRL_CURSOR_2 2 +#define CRSR_CTRL_CURSOR_3 3 + +#define CRSR_CONFIG_CRSR_SIZE (1 << 0) +#define CRSR_CONFIG_CRSR_FRAME_SYNC (1 << 1) + +#define CRSR_PALETTE_RED_SHIFT 0 +#define CRSR_PALETTE_GREEN_SHIFT 8 +#define CRSR_PALETTE_BLUE_SHIFT 16 + +#define CRSR_PALETTE_RED_MASK 0x000000ff +#define CRSR_PALETTE_GREEN_MASK 0x0000ff00 +#define CRSR_PALETTE_BLUE_MASK 0x00ff0000 +#define CRSR_PALETTE_MASK (~0xff000000) + +#define CRSR_XY_MASK 0x000003ff +#define CRSR_XY_X_SHIFT 0 +#define CRSR_XY_Y_SHIFT 16 + +#define CRSR_XY_X_MASK CRSR_XY_MASK +#define CRSR_XY_Y_MASK (CRSR_XY_MASK << CRSR_XY_Y_SHIFT) + +#define CRSR_CLIP_MASK 0x3f +#define CRSR_CLIP_X_SHIFT 0 +#define CRSR_CLIP_Y_SHIFT 8 + +#define CRSR_CLIP_X_MASK CRSR_CLIP_MASK +#define CRSR_CLIP_Y_MASK (CRSR_CLIP_MASK << CRSR_CLIP_Y_SHIFT) + +#define CRSR_IMSC_CRSR_IM (1<<0) +#define CRSR_ICR_CRSR_IC (1<<0) +#define CRSR_RIS_CRSR_RIS (1<<0) +#define CRSR_MIS_CRSR_MIS (1<<0) + +#endif /* PL111_CLCD_EXT_H_ */ diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm.h b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm.h new file mode 100755 index 0000000..64d87b6 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm.h @@ -0,0 +1,270 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _PL111_DRM_H_ +#define _PL111_DRM_H_ + +#define DRIVER_AUTHOR "ARM Ltd." +#define DRIVER_NAME "pl111_drm" +#define DRIVER_DESC "DRM module for PL111" +#define DRIVER_LICENCE "GPL" +#define DRIVER_ALIAS "platform:pl111_drm" +#define DRIVER_DATE "20101111" +#define DRIVER_VERSION "0.2" +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 1 + +/* + * Number of flips allowed in flight at any one time. Any more flips requested + * beyond this value will cause the caller to block until earlier flips have + * completed. + * + * For performance reasons, this must be greater than the number of buffers + * used in the rendering pipeline. Note that the rendering pipeline can contain + * different types of buffer, e.g.: + * - 2 final framebuffers + * - >2 geometry buffers for GPU use-cases + * - >2 vertex buffers for GPU use-cases + * + * For example, a system using 5 geometry buffers could have 5 flips in flight, + * and so NR_FLIPS_IN_FLIGHT_THRESHOLD must be 5 or greater. + * + * Whilst there may be more intermediate buffers (such as vertex/geometry) than + * final framebuffers, KDS is used to ensure that GPU rendering waits for the + * next off-screen buffer, so it doesn't overwrite an on-screen buffer and + * produce tearing. + */ + +/* + * Here, we choose a conservative value. A lower value is most likely + * suitable for GPU use-cases. + */ +#define NR_FLIPS_IN_FLIGHT_THRESHOLD 16 + +#define CLCD_IRQ_NEXTBASE_UPDATE (1u<<2) + +struct pl111_drm_flip_resource; + +struct pl111_gem_bo_dma { + dma_addr_t fb_dev_addr; + void *fb_cpu_addr; +}; + +struct pl111_gem_bo_shm { + struct page **pages; + dma_addr_t *dma_addrs; +}; + +struct pl111_gem_bo { + struct drm_gem_object gem_object; + u32 type; + union { + struct pl111_gem_bo_dma dma; + struct pl111_gem_bo_shm shm; + } backing_data; + struct sg_table *sgt; +}; + +extern struct pl111_drm_dev_private priv; + +struct pl111_drm_framebuffer { + struct drm_framebuffer fb; + struct pl111_gem_bo *bo; +}; + +struct pl111_drm_flip_resource { +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + /* This is the kds set associated to the dma_buf we want to flip */ + struct kds_resource_set *kds_res_set; +#endif + struct drm_framebuffer *fb; + struct drm_crtc *crtc; + struct list_head link; + bool page_flip; + struct drm_pending_vblank_event *event; +}; + +struct pl111_drm_crtc { + struct drm_crtc crtc; + int crtc_index; + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + /* This protects "old_kds_res_set" and "displaying_fb" */ + spinlock_t current_displaying_lock; + /* + * When a buffer is displayed its associated kds resource + * will be obtained and stored here. Every time a buffer + * flip is completed this old kds set is released and assigned + * the kds set of the new buffer. + */ + struct kds_resource_set *old_kds_res_set; + /* + * Stores which frame buffer is currently being displayed by + * this CRTC or NULL if nothing is being displayed. It is used + * to tell whether we need to obtain a set of kds resources for + * exported buffer objects. + */ + struct drm_framebuffer *displaying_fb; +#endif + struct drm_display_mode *new_mode; + struct drm_display_mode *current_mode; + int last_bpp; + + /* + * This spinlock protects "update_queue", "current_update_res" + * and calls to do_flip_to_res() which updates the CLCD base + * registers. + */ + spinlock_t base_update_lock; + /* + * The resource that caused a base address update. Only one can be + * pending, hence it's != NULL if there's a pending update + */ + struct pl111_drm_flip_resource *current_update_res; + /* Queue of things waiting to update the base address */ + struct list_head update_queue; + + void (*show_framebuffer_cb)(struct pl111_drm_flip_resource *flip_res, + struct drm_framebuffer *fb); +}; + +struct pl111_drm_connector { + struct drm_connector connector; +}; + +struct pl111_drm_encoder { + struct drm_encoder encoder; +}; + +struct pl111_drm_dev_private { + struct pl111_drm_crtc *pl111_crtc; + + struct amba_device *amba_dev; + unsigned long mmio_start; + __u32 mmio_len; + void *regs; + struct clk *clk; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + struct kds_callback kds_cb; + struct kds_callback kds_obtain_current_cb; +#endif + /* + * Number of flips that were started in show_framebuffer_on_crtc(), + * but haven't completed yet - because we do deferred flipping + */ + atomic_t nr_flips_in_flight; + wait_queue_head_t wait_for_flips; + + /* + * Used to prevent race between pl111_dma_buf_release and + * drm_gem_prime_handle_to_fd + */ + struct mutex export_dma_buf_lock; + + uint32_t number_crtcs; + + /* Cache for flip resources used to avoid kmalloc on each page flip */ + struct kmem_cache *page_flip_slab; +}; + +enum pl111_cursor_size { + CURSOR_32X32, + CURSOR_64X64 +}; + +enum pl111_cursor_sync { + CURSOR_SYNC_NONE, + CURSOR_SYNC_VSYNC +}; + + +/** + * Buffer allocation function which is more flexible than dumb_create(), + * it allows passing driver specific flags to control the kind of buffer + * to be allocated. + */ +int pl111_drm_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/****** TODO MIDEGL-1718: this should be moved to uapi/include/drm/pl111_drm.h ********/ + +/* + * Parameters for different buffer objects: + * bit [0]: backing storage + * (0 -> SHM) + * (1 -> DMA) + * bit [2:1]: kind of mapping + * (0x0 -> uncached) + * (0x1 -> write combine) + * (0x2 -> cached) + */ +#define PL111_BOT_MASK (0x7) +#define PL111_BOT_SHM (0x0 << 0) +#define PL111_BOT_DMA (0x1 << 0) +#define PL111_BOT_UNCACHED (0x0 << 1) +#define PL111_BOT_WC (0x1 << 1) +#define PL111_BOT_CACHED (0x2 << 1) + +/** + * User-desired buffer creation information structure. + * + * @size: user-desired memory allocation size. + * - this size value would be page-aligned internally. + * @flags: user request for setting memory type or cache attributes as a bit op + * - PL111_BOT_DMA / PL111_BOT_SHM + * - PL111_BOT_UNCACHED / PL111_BOT_WC / PL111_BOT_CACHED + * @handle: returned a handle to created gem object. + * - this handle will be set by gem module of kernel side. + */ +struct drm_pl111_gem_create { + uint32_t height; + uint32_t width; + uint32_t bpp; + uint32_t flags; + /* handle, pitch, size will be returned */ + uint32_t handle; + uint32_t pitch; + uint64_t size; +}; + +#define DRM_PL111_GEM_CREATE 0x00 + +#define DRM_IOCTL_PL111_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_PL111_GEM_CREATE, struct drm_pl111_gem_create) +/****************************************************************************/ + +#define PL111_FB_FROM_FRAMEBUFFER(drm_fb) \ + (container_of(drm_fb, struct pl111_drm_framebuffer, fb)) + +#define PL111_BO_FROM_FRAMEBUFFER(drm_fb) \ + (container_of(drm_fb, struct pl111_drm_framebuffer, fb)->bo) + +#define PL111_BO_FROM_GEM(gem_obj) \ + container_of(gem_obj, struct pl111_gem_bo, gem_object) + +#define to_pl111_crtc(x) container_of(x, struct pl111_drm_crtc, crtc) + +#define PL111_ENCODER_FROM_ENCODER(x) \ + container_of(x, struct pl111_drm_encoder, encoder) + +#define PL111_CONNECTOR_FROM_CONNECTOR(x) \ + container_of(x, struct pl111_drm_connector, connector) + +#include "pl111_drm_funcs.h" + +#endif /* _PL111_DRM_H_ */ diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c new file mode 100755 index 0000000..c7c3a22 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c @@ -0,0 +1,170 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_connector.c + * Implementation of the connector functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + + +static struct { + int w, h, type; +} pl111_drm_modes[] = { + { 640, 480, DRM_MODE_TYPE_PREFERRED}, + { 800, 600, 0}, + {1024, 768, 0}, + { -1, -1, -1} +}; + +void pl111_connector_destroy(struct drm_connector *connector) +{ + struct pl111_drm_connector *pl111_connector = + PL111_CONNECTOR_FROM_CONNECTOR(connector); + + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(pl111_connector); +} + +enum drm_connector_status pl111_connector_detect(struct drm_connector + *connector, bool force) +{ + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); + return connector_status_connected; +} + +void pl111_connector_dpms(struct drm_connector *connector, int mode) +{ + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); +} + +struct drm_encoder * +pl111_connector_helper_best_encoder(struct drm_connector *connector) +{ + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); + + if (connector->encoder != NULL) { + return connector->encoder; /* Return attached encoder */ + } else { + /* + * If there is no attached encoder we choose the best candidate + * from the list. + * For PL111 there is only one encoder so we return the first + * one we find. + * Other h/w would require a suitable criterion below. + */ + struct drm_encoder *encoder = NULL; + struct drm_device *dev = connector->dev; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + head) { + if (1) { /* criterion ? */ + break; + } + } + return encoder; /* return best candidate encoder */ + } +} + +int pl111_connector_helper_get_modes(struct drm_connector *connector) +{ + int i = 0; + int count = 0; + + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); + + while (pl111_drm_modes[i].w != -1) { + struct drm_display_mode *mode = + drm_mode_find_dmt(connector->dev, + pl111_drm_modes[i].w, + pl111_drm_modes[i].h, + 60 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + , false +#endif + ); + + if (mode != NULL) { + mode->type |= pl111_drm_modes[i].type; + drm_mode_probed_add(connector, mode); + count++; + } + + i++; + } + + DRM_DEBUG_KMS("found %d modes\n", count); + + return count; +} + +int pl111_connector_helper_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); + return MODE_OK; +} + +const struct drm_connector_funcs connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = pl111_connector_destroy, + .detect = pl111_connector_detect, + .dpms = pl111_connector_dpms, +}; + +const struct drm_connector_helper_funcs connector_helper_funcs = { + .get_modes = pl111_connector_helper_get_modes, + .mode_valid = pl111_connector_helper_mode_valid, + .best_encoder = pl111_connector_helper_best_encoder, +}; + +struct pl111_drm_connector *pl111_connector_create(struct drm_device *dev) +{ + struct pl111_drm_connector *pl111_connector; + + pl111_connector = kzalloc(sizeof(struct pl111_drm_connector), + GFP_KERNEL); + + if (pl111_connector == NULL) { + pr_err("Failed to allocated pl111_drm_connector\n"); + return NULL; + } + + drm_connector_init(dev, &pl111_connector->connector, &connector_funcs, + DRM_MODE_CONNECTOR_DVII); + + drm_connector_helper_add(&pl111_connector->connector, + &connector_helper_funcs); + + drm_sysfs_connector_add(&pl111_connector->connector); + + return pl111_connector; +} + diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c new file mode 100755 index 0000000..ede07ff --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c @@ -0,0 +1,449 @@ +/* + * + * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_crtc.c + * Implementation of the CRTC functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + +static int pl111_crtc_num; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) +#define export_dma_buf export_dma_buf +#else +#define export_dma_buf dma_buf +#endif + +void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc) +{ + struct drm_device *dev = pl111_crtc->crtc.dev; + struct pl111_drm_flip_resource *old_flip_res; + struct pl111_gem_bo *bo; + unsigned long irq_flags; + int flips_in_flight; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + unsigned long flags; +#endif + + spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); + + /* + * Cache the flip resource that caused the IRQ since it will be + * dispatched later. Early return if the IRQ isn't associated to + * a base register update. + * + * TODO MIDBASE-2790: disable IRQs when a flip is not pending. + */ + old_flip_res = pl111_crtc->current_update_res; + if (!old_flip_res) { + spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); + return; + } + pl111_crtc->current_update_res = NULL; + + /* Prepare the next flip (if any) of the queue as soon as possible. */ + if (!list_empty(&pl111_crtc->update_queue)) { + struct pl111_drm_flip_resource *flip_res; + /* Remove the head of the list */ + flip_res = list_first_entry(&pl111_crtc->update_queue, + struct pl111_drm_flip_resource, link); + list_del(&flip_res->link); + do_flip_to_res(flip_res); + /* + * current_update_res will be set, so guarentees that + * another flip_res coming in gets queued instead of + * handled immediately + */ + } + spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); + + /* Finalize properly the flip that caused the IRQ */ + DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res); + + bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb); +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); + release_kds_resource_and_display(old_flip_res); + spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); +#endif + /* Release DMA buffer on this flip */ + + if (bo->gem_object.export_dma_buf != NULL) + dma_buf_put(bo->gem_object.export_dma_buf); + + drm_handle_vblank(dev, pl111_crtc->crtc_index); + + /* Wake up any processes waiting for page flip event */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (old_flip_res->event) { + spin_lock_bh(&dev->event_lock); + drm_send_vblank_event(dev, pl111_crtc->crtc_index, + old_flip_res->event); + spin_unlock_bh(&dev->event_lock); + } +#else + if (old_flip_res->event) { + struct drm_pending_vblank_event *e = old_flip_res->event; + struct timeval now; + unsigned int seq; + + DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__, + old_flip_res->event); + + spin_lock_bh(&dev->event_lock); + seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index, + &now); + e->pipe = pl111_crtc->crtc_index; + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + + list_add_tail(&e->base.link, + &e->base.file_priv->event_list); + + wake_up_interruptible(&e->base.file_priv->event_wait); + spin_unlock_bh(&dev->event_lock); + } +#endif + + drm_vblank_put(dev, pl111_crtc->crtc_index); + + /* + * workqueue.c:process_one_work(): + * "It is permissible to free the struct work_struct from + * inside the function that is called from it" + */ + kmem_cache_free(priv.page_flip_slab, old_flip_res); + + flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight); + if (flips_in_flight == 0 || + flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1)) + wake_up(&priv.wait_for_flips); + + DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res); +} + +void show_framebuffer_on_crtc_cb(void *cb1, void *cb2) +{ + struct pl111_drm_flip_resource *flip_res = cb1; + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); + + pl111_crtc->show_framebuffer_cb(cb1, cb2); +} + +int show_framebuffer_on_crtc(struct drm_crtc *crtc, + struct drm_framebuffer *fb, bool page_flip, + struct drm_pending_vblank_event *event) +{ + struct pl111_gem_bo *bo; + struct pl111_drm_flip_resource *flip_res; + int flips_in_flight; + int old_flips_in_flight; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) + crtc->fb = fb; +#else + crtc->primary->fb = fb; +#endif + + bo = PL111_BO_FROM_FRAMEBUFFER(fb); + if (bo == NULL) { + DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n"); + return -EINVAL; + } + + /* If this is a full modeset, wait for all outstanding flips to complete + * before continuing. This avoids unnecessary complication from being + * able to queue up multiple modesets and queues of mixed modesets and + * page flips. + * + * Modesets should be uncommon and will not be performant anyway, so + * making them synchronous should have negligible performance impact. + */ + if (!page_flip) { + int ret = wait_event_killable(priv.wait_for_flips, + atomic_read(&priv.nr_flips_in_flight) == 0); + if (ret) + return ret; + } + + /* + * There can be more 'early display' flips in flight than there are + * buffers, and there is (currently) no explicit bound on the number of + * flips. Hence, we need a new allocation for each one. + * + * Note: this could be optimized down if we knew a bound on the flips, + * since an application can only have so many buffers in flight to be + * useful/not hog all the memory + */ + flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL); + if (flip_res == NULL) { + pr_err("kmem_cache_alloc failed to alloc - flip ignored\n"); + return -ENOMEM; + } + + /* + * increment flips in flight, whilst blocking when we reach + * NR_FLIPS_IN_FLIGHT_THRESHOLD + */ + do { + /* + * Note: use of assign-and-then-compare in the condition to set + * flips_in_flight + */ + int ret = wait_event_killable(priv.wait_for_flips, + (flips_in_flight = + atomic_read(&priv.nr_flips_in_flight)) + < NR_FLIPS_IN_FLIGHT_THRESHOLD); + if (ret != 0) { + kmem_cache_free(priv.page_flip_slab, flip_res); + return ret; + } + + old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight, + flips_in_flight, flips_in_flight + 1); + } while (old_flips_in_flight != flips_in_flight); + + flip_res->fb = fb; + flip_res->crtc = crtc; + flip_res->page_flip = page_flip; + flip_res->event = event; + INIT_LIST_HEAD(&flip_res->link); + DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res); +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + if (bo->gem_object.export_dma_buf != NULL) { + struct dma_buf *buf = bo->gem_object.export_dma_buf; + unsigned long shared[1] = { 0 }; + struct kds_resource *resource_list[1] = { + get_dma_buf_kds_resource(buf) }; + int err; + + get_dma_buf(buf); + DRM_DEBUG_KMS("Got dma_buf %p\n", buf); + + /* Wait for the KDS resource associated with this buffer */ + err = kds_async_waitall(&flip_res->kds_res_set, + &priv.kds_cb, flip_res, fb, 1, shared, + resource_list); + BUG_ON(err); + } else { + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + + DRM_DEBUG_KMS("No dma_buf for this flip\n"); + + /* No dma-buf attached so just call the callback directly */ + flip_res->kds_res_set = NULL; + pl111_crtc->show_framebuffer_cb(flip_res, fb); + } +#else + if (bo->gem_object.export_dma_buf != NULL) { + struct dma_buf *buf = bo->gem_object.export_dma_buf; + + get_dma_buf(buf); + DRM_DEBUG_KMS("Got dma_buf %p\n", buf); + } else { + DRM_DEBUG_KMS("No dma_buf for this flip\n"); + } + + /* No dma-buf attached to this so just call the callback directly */ + { + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + pl111_crtc->show_framebuffer_cb(flip_res, fb); + } +#endif + + /* For the same reasons as the wait at the start of this function, + * wait for the modeset to complete before continuing. + */ + if (!page_flip) { + int ret = wait_event_killable(priv.wait_for_flips, + flips_in_flight == 0); + if (ret) + return ret; + } + + return 0; +} + +int pl111_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + struct drm_pending_vblank_event *event) +#else + struct drm_pending_vblank_event *event, + uint32_t flags) +#endif +{ + DRM_DEBUG_KMS("%s: crtc=%p, fb=%p, event=%p\n", + __func__, crtc, fb, event); + return show_framebuffer_on_crtc(crtc, fb, true, event); +} + +int pl111_crtc_helper_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + int ret; + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + struct drm_display_mode *duplicated_mode; + + DRM_DEBUG_KMS("DRM crtc_helper_mode_set, x=%d y=%d bpp=%d\n", + adjusted_mode->hdisplay, adjusted_mode->vdisplay, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) + crtc->fb->bits_per_pixel); +#else + crtc->primary->fb->bits_per_pixel); +#endif + + duplicated_mode = drm_mode_duplicate(crtc->dev, adjusted_mode); + if (!duplicated_mode) + return -ENOMEM; + + pl111_crtc->new_mode = duplicated_mode; +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) + ret = show_framebuffer_on_crtc(crtc, crtc->fb, false, NULL); +#else + ret = show_framebuffer_on_crtc(crtc, crtc->primary->fb, false, NULL); +#endif + if (ret != 0) { + pl111_crtc->new_mode = pl111_crtc->current_mode; + drm_mode_destroy(crtc->dev, duplicated_mode); + } + + return ret; +} + +void pl111_crtc_helper_prepare(struct drm_crtc *crtc) +{ + DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); +} + +void pl111_crtc_helper_commit(struct drm_crtc *crtc) +{ + DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); +} + +bool pl111_crtc_helper_mode_fixup(struct drm_crtc *crtc, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + const struct drm_display_mode *mode, +#else + struct drm_display_mode *mode, +#endif + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); + +#ifdef CONFIG_ARCH_VEXPRESS + /* + * 1024x768 with more than 16 bits per pixel may not work + * correctly on Versatile Express due to bandwidth issues + */ + if (mode->hdisplay == 1024 && mode->vdisplay == 768 && +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) + crtc->fb->bits_per_pixel > 16) { +#else + crtc->primary->fb->bits_per_pixel > 16) { +#endif + DRM_INFO("*WARNING* 1024x768 at > 16 bpp may suffer corruption\n"); + } +#endif + + return true; +} + +void pl111_crtc_helper_disable(struct drm_crtc *crtc) +{ + int ret; + + DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); + + /* don't disable crtc until no flips in flight as irq will be disabled */ + ret = wait_event_killable(priv.wait_for_flips, atomic_read(&priv.nr_flips_in_flight) == 0); + if(ret) { + pr_err("pl111_crtc_helper_disable failed\n"); + return; + } + clcd_disable(crtc); +} + +void pl111_crtc_destroy(struct drm_crtc *crtc) +{ + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + + DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); + + drm_crtc_cleanup(crtc); + kfree(pl111_crtc); +} + +const struct drm_crtc_funcs crtc_funcs = { + .cursor_set = pl111_crtc_cursor_set, + .cursor_move = pl111_crtc_cursor_move, + .set_config = drm_crtc_helper_set_config, + .page_flip = pl111_crtc_page_flip, + .destroy = pl111_crtc_destroy +}; + +const struct drm_crtc_helper_funcs crtc_helper_funcs = { + .mode_set = pl111_crtc_helper_mode_set, + .prepare = pl111_crtc_helper_prepare, + .commit = pl111_crtc_helper_commit, + .mode_fixup = pl111_crtc_helper_mode_fixup, + .disable = pl111_crtc_helper_disable, +}; + +struct pl111_drm_crtc *pl111_crtc_create(struct drm_device *dev) +{ + struct pl111_drm_crtc *pl111_crtc; + + pl111_crtc = kzalloc(sizeof(struct pl111_drm_crtc), GFP_KERNEL); + if (pl111_crtc == NULL) { + pr_err("Failed to allocated pl111_drm_crtc\n"); + return NULL; + } + + drm_crtc_init(dev, &pl111_crtc->crtc, &crtc_funcs); + drm_crtc_helper_add(&pl111_crtc->crtc, &crtc_helper_funcs); + + pl111_crtc->crtc_index = pl111_crtc_num; + pl111_crtc_num++; + pl111_crtc->crtc.enabled = 0; + pl111_crtc->last_bpp = 0; + pl111_crtc->current_update_res = NULL; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + pl111_crtc->displaying_fb = NULL; + pl111_crtc->old_kds_res_set = NULL; + spin_lock_init(&pl111_crtc->current_displaying_lock); +#endif + pl111_crtc->show_framebuffer_cb = show_framebuffer_on_crtc_cb_internal; + INIT_LIST_HEAD(&pl111_crtc->update_queue); + spin_lock_init(&pl111_crtc->base_update_lock); + + return pl111_crtc; +} + diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c new file mode 100755 index 0000000..4bf20fe --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c @@ -0,0 +1,331 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_cursor.c + * Implementation of cursor functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "pl111_clcd_ext.h" +#include "pl111_drm.h" + +#define PL111_MAX_CURSOR_WIDTH (64) +#define PL111_MAX_CURSOR_HEIGHT (64) + +#define ARGB_2_LBBP_BINARY_THRESHOLD (1 << 7) +#define ARGB_ALPHA_SHIFT 24 +#define ARGB_ALPHA_MASK (0xff << ARGB_ALPHA_SHIFT) +#define ARGB_RED_SHIFT 16 +#define ARGB_RED_MASK (0xff << ARGB_RED_SHIFT) +#define ARGB_GREEN_SHIFT 8 +#define ARGB_GREEN_MASK (0xff << ARGB_GREEN_SHIFT) +#define ARGB_BLUE_SHIFT 0 +#define ARGB_BLUE_MASK (0xff << ARGB_BLUE_SHIFT) + + +void pl111_set_cursor_size(enum pl111_cursor_size size) +{ + u32 reg_data = readl(priv.regs + CLCD_CRSR_CONFIG); + + if (size == CURSOR_64X64) + reg_data |= CRSR_CONFIG_CRSR_SIZE; + else + reg_data &= ~CRSR_CONFIG_CRSR_SIZE; + + writel(reg_data, priv.regs + CLCD_CRSR_CONFIG); +} + +void pl111_set_cursor_sync(enum pl111_cursor_sync sync) +{ + u32 reg_data = readl(priv.regs + CLCD_CRSR_CONFIG); + + if (sync == CURSOR_SYNC_VSYNC) + reg_data |= CRSR_CONFIG_CRSR_FRAME_SYNC; + else + reg_data &= ~CRSR_CONFIG_CRSR_FRAME_SYNC; + + writel(reg_data, priv.regs + CLCD_CRSR_CONFIG); +} + +void pl111_set_cursor(u32 cursor) +{ + u32 reg_data = readl(priv.regs + CLCD_CRSR_CTRL); + + reg_data &= ~(CRSR_CTRL_CRSR_MAX << CRSR_CTRL_CRSR_NUM_SHIFT); + reg_data |= (cursor & CRSR_CTRL_CRSR_MAX) << CRSR_CTRL_CRSR_NUM_SHIFT; + + writel(reg_data, priv.regs + CLCD_CRSR_CTRL); +} + +void pl111_set_cursor_enable(bool enable) +{ + u32 reg_data = readl(priv.regs + CLCD_CRSR_CTRL); + + if (enable) + reg_data |= CRSR_CTRL_CRSR_ON; + else + reg_data &= ~CRSR_CTRL_CRSR_ON; + + writel(reg_data, priv.regs + CLCD_CRSR_CTRL); +} + +void pl111_set_cursor_position(u32 x, u32 y) +{ + u32 reg_data = (x & CRSR_XY_MASK) | + ((y & CRSR_XY_MASK) << CRSR_XY_Y_SHIFT); + + writel(reg_data, priv.regs + CLCD_CRSR_XY); +} + +void pl111_set_cursor_clipping(u32 x, u32 y) +{ + u32 reg_data; + + /* + * Do not allow setting clipping values larger than + * the cursor size since the cursor is already fully hidden + * when x,y = PL111_MAX_CURSOR_WIDTH. + */ + if (x > PL111_MAX_CURSOR_WIDTH) + x = PL111_MAX_CURSOR_WIDTH; + if (y > PL111_MAX_CURSOR_WIDTH) + y = PL111_MAX_CURSOR_WIDTH; + + reg_data = (x & CRSR_CLIP_MASK) | + ((y & CRSR_CLIP_MASK) << CRSR_CLIP_Y_SHIFT); + + writel(reg_data, priv.regs + CLCD_CRSR_CLIP); +} + +void pl111_set_cursor_palette(u32 color0, u32 color1) +{ + writel(color0 & CRSR_PALETTE_MASK, priv.regs + CLCD_CRSR_PALETTE_0); + writel(color1 & CRSR_PALETTE_MASK, priv.regs + CLCD_CRSR_PALETTE_1); +} + +void pl111_cursor_enable(void) +{ + pl111_set_cursor_sync(CURSOR_SYNC_VSYNC); + pl111_set_cursor_size(CURSOR_64X64); + pl111_set_cursor_palette(0x0, 0x00ffffff); + pl111_set_cursor_enable(true); +} + +void pl111_cursor_disable(void) +{ + pl111_set_cursor_enable(false); +} + +/* shift required to locate pixel into the correct position in + * a cursor LBBP word, indexed by x mod 16. + */ +static const unsigned char +x_mod_16_to_value_shift[CLCD_CRSR_IMAGE_PIXELS_PER_WORD] = { + 6, 4, 2, 0, 14, 12, 10, 8, 22, 20, 18, 16, 30, 28, 26, 24 +}; + +/* Pack the pixel value into its correct position in the buffer as specified + * for LBBP */ +static inline void +set_lbbp_pixel(uint32_t *buffer, unsigned int x, unsigned int y, + uint32_t value) +{ + u32 *cursor_ram = priv.regs + CLCD_CRSR_IMAGE; + uint32_t shift; + uint32_t data; + + shift = x_mod_16_to_value_shift[x % CLCD_CRSR_IMAGE_PIXELS_PER_WORD]; + + /* Get the word containing this pixel */ + cursor_ram = cursor_ram + (x >> CLCD_CRSR_IMAGE_WORDS_PER_LINE) + (y << 2); + + /* Update pixel in cursor RAM */ + data = readl(cursor_ram); + data &= ~(CLCD_CRSR_LBBP_COLOR_MASK << shift); + data |= value << shift; + writel(data, cursor_ram); +} + +static u32 pl111_argb_to_lbbp(u32 argb_pix) +{ + u32 lbbp_pix = CLCD_CRSR_LBBP_TRANSPARENT; + u32 alpha = (argb_pix & ARGB_ALPHA_MASK) >> ARGB_ALPHA_SHIFT; + u32 red = (argb_pix & ARGB_RED_MASK) >> ARGB_RED_SHIFT; + u32 green = (argb_pix & ARGB_GREEN_MASK) >> ARGB_GREEN_SHIFT; + u32 blue = (argb_pix & ARGB_BLUE_MASK) >> ARGB_BLUE_SHIFT; + + /* + * Converting from 8 pixel transparency to binary transparency + * it's the best we can achieve. + */ + if (alpha & ARGB_2_LBBP_BINARY_THRESHOLD) { + u32 gray, max, min; + + /* + * Convert to gray using the lightness method: + * gray = [max(R,G,B) + min(R,G,B)]/2 + */ + min = min(red, green); + min = min(min, blue); + max = max(red, green); + max = max(max, blue); + gray = (min + max) >> 1; /* divide by 2 */ + /* Apply binary threshold to the gray value calculated */ + if (gray & ARGB_2_LBBP_BINARY_THRESHOLD) + lbbp_pix = CLCD_CRSR_LBBP_FOREGROUND; + else + lbbp_pix = CLCD_CRSR_LBBP_BACKGROUND; + } + + return lbbp_pix; +} + +/* + * The PL111 hardware cursor supports only LBBP which is a 2bpp format but + * the cursor format from userspace is ARGB8888 so we need to convert + * to LBBP here. + */ +static void pl111_set_cursor_image(u32 *data) +{ +#ifdef ARGB_LBBP_CONVERSION_DEBUG + /* Add 1 on width to insert trailing NULL */ + char string_cursor[PL111_MAX_CURSOR_WIDTH + 1]; +#endif /* ARGB_LBBP_CONVERSION_DEBUG */ + unsigned int x; + unsigned int y; + + for (y = 0; y < PL111_MAX_CURSOR_HEIGHT; y++) { + for (x = 0; x < PL111_MAX_CURSOR_WIDTH; x++) { + u32 value = pl111_argb_to_lbbp(*data); + +#ifdef ARGB_LBBP_CONVERSION_DEBUG + if (value == CLCD_CRSR_LBBP_TRANSPARENT) + string_cursor[x] = 'T'; + else if (value == CLCD_CRSR_LBBP_FOREGROUND) + string_cursor[x] = 'F'; + else if (value == CLCD_CRSR_LBBP_INVERSE) + string_cursor[x] = 'I'; + else + string_cursor[x] = 'B'; + +#endif /* ARGB_LBBP_CONVERSION_DEBUG */ + set_lbbp_pixel(data, x, y, value); + ++data; + } +#ifdef ARGB_LBBP_CONVERSION_DEBUG + string_cursor[PL111_MAX_CURSOR_WIDTH] = '\0'; + DRM_INFO("%s\n", string_cursor); +#endif /* ARGB_LBBP_CONVERSION_DEBUG */ + } +} + +int pl111_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct drm_gem_object *obj; + struct pl111_gem_bo *bo; + + DRM_DEBUG_KMS("handle = %u, width = %u, height = %u\n", + handle, width, height); + + if (!handle) { + pl111_cursor_disable(); + return 0; + } + + if ((width != PL111_MAX_CURSOR_WIDTH) || + (height != PL111_MAX_CURSOR_HEIGHT)) + return -EINVAL; + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object for handle = %d\n", + handle); + return -ENOENT; + } + + /* + * We expect a PL111_MAX_CURSOR_WIDTH x PL111_MAX_CURSOR_HEIGHT + * ARGB888 buffer object in the input. + * + */ + if (obj->size < (PL111_MAX_CURSOR_WIDTH * PL111_MAX_CURSOR_HEIGHT * 4)) { + DRM_ERROR("Cannot set cursor with an obj size = %d\n", + obj->size); + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + + bo = PL111_BO_FROM_GEM(obj); + if (!(bo->type & PL111_BOT_DMA)) { + DRM_ERROR("Tried to set cursor with non DMA backed obj = %p\n", + obj); + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + + pl111_set_cursor_image(bo->backing_data.dma.fb_cpu_addr); + + /* + * Since we copy the contents of the buffer to the HW cursor internal + * memory this GEM object is not needed anymore. + */ + drm_gem_object_unreference_unlocked(obj); + + pl111_cursor_enable(); + + return 0; +} + +int pl111_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + int x_clip = 0; + int y_clip = 0; + + DRM_DEBUG("x %d y %d\n", x, y); + + /* + * The cursor image is clipped automatically at the screen limits when + * it extends beyond the screen image to the right or bottom but + * we must clip it using pl111 HW features for negative values. + */ + if (x < 0) { + x_clip = -x; + x = 0; + } + if (y < 0) { + y_clip = -y; + y = 0; + } + + pl111_set_cursor_clipping(x_clip, y_clip); + pl111_set_cursor_position(x, y); + + return 0; +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c new file mode 100755 index 0000000..6619c07 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c @@ -0,0 +1,338 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_device.c + * Implementation of the Linux device driver entrypoints for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + +struct pl111_drm_dev_private priv; + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS +static void initial_kds_obtained(void *cb1, void *cb2) +{ + wait_queue_head_t *wait = (wait_queue_head_t *) cb1; + bool *cb_has_called = (bool *) cb2; + + *cb_has_called = true; + wake_up(wait); +} + +/* Must be called from within current_displaying_lock spinlock */ +void release_kds_resource_and_display(struct pl111_drm_flip_resource *flip_res) +{ + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); + pl111_crtc->displaying_fb = flip_res->fb; + + /* Release the previous buffer */ + if (pl111_crtc->old_kds_res_set != NULL) { + /* + * Can flip to the same buffer, but must not release the current + * resource set + */ + BUG_ON(pl111_crtc->old_kds_res_set == flip_res->kds_res_set); + kds_resource_set_release(&pl111_crtc->old_kds_res_set); + } + /* Record the current buffer, to release on the next buffer flip */ + pl111_crtc->old_kds_res_set = flip_res->kds_res_set; +} +#endif + +void pl111_drm_preclose(struct drm_device *dev, struct drm_file *file_priv) +{ + DRM_DEBUG_KMS("DRM %s on dev=%p\n", __func__, dev); +} + +void pl111_drm_lastclose(struct drm_device *dev) +{ + DRM_DEBUG_KMS("DRM %s on dev=%p\n", __func__, dev); +} + +/* + * pl111 does not have a proper HW counter for vblank IRQs so enable_vblank + * and disable_vblank are just no op callbacks. + */ +static int pl111_enable_vblank(struct drm_device *dev, int crtc) +{ + DRM_DEBUG_KMS("%s: dev=%p, crtc=%d", __func__, dev, crtc); + return 0; +} + +static void pl111_disable_vblank(struct drm_device *dev, int crtc) +{ + DRM_DEBUG_KMS("%s: dev=%p, crtc=%d", __func__, dev, crtc); +} + +struct drm_mode_config_funcs mode_config_funcs = { + .fb_create = pl111_fb_create, +}; + +static int pl111_modeset_init(struct drm_device *dev) +{ + struct drm_mode_config *mode_config; + struct pl111_drm_dev_private *priv = dev->dev_private; + struct pl111_drm_connector *pl111_connector; + struct pl111_drm_encoder *pl111_encoder; + int ret = 0; + + if (priv == NULL) + return -EINVAL; + + drm_mode_config_init(dev); + mode_config = &dev->mode_config; + mode_config->funcs = &mode_config_funcs; + mode_config->min_width = 1; + mode_config->max_width = 1024; + mode_config->min_height = 1; + mode_config->max_height = 768; + + priv->pl111_crtc = pl111_crtc_create(dev); + if (priv->pl111_crtc == NULL) { + pr_err("Failed to create pl111_drm_crtc\n"); + ret = -ENOMEM; + goto out_config; + } + + priv->number_crtcs = 1; + + pl111_connector = pl111_connector_create(dev); + if (pl111_connector == NULL) { + pr_err("Failed to create pl111_drm_connector\n"); + ret = -ENOMEM; + goto out_config; + } + + pl111_encoder = pl111_encoder_create(dev, 1); + if (pl111_encoder == NULL) { + pr_err("Failed to create pl111_drm_encoder\n"); + ret = -ENOMEM; + goto out_config; + } + + ret = drm_mode_connector_attach_encoder(&pl111_connector->connector, + &pl111_encoder->encoder); + if (ret != 0) { + DRM_ERROR("Failed to attach encoder\n"); + goto out_config; + } + + pl111_connector->connector.encoder = &pl111_encoder->encoder; + + pl111_encoder->encoder.crtc = &priv->pl111_crtc->crtc; + + goto finish; + +out_config: + drm_mode_config_cleanup(dev); +finish: + DRM_DEBUG("%s returned %d\n", __func__, ret); + return ret; +} + +static void pl111_modeset_fini(struct drm_device *dev) +{ + drm_mode_config_cleanup(dev); +} + +static int pl111_drm_load(struct drm_device *dev, unsigned long chipset) +{ + int ret = 0; + + pr_info("DRM %s\n", __func__); + + mutex_init(&priv.export_dma_buf_lock); + atomic_set(&priv.nr_flips_in_flight, 0); + init_waitqueue_head(&priv.wait_for_flips); +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + ret = kds_callback_init(&priv.kds_cb, 1, show_framebuffer_on_crtc_cb); + if (ret != 0) { + pr_err("Failed to initialise KDS callback\n"); + goto finish; + } + + ret = kds_callback_init(&priv.kds_obtain_current_cb, 1, + initial_kds_obtained); + if (ret != 0) { + pr_err("Failed to init KDS obtain callback\n"); + kds_callback_term(&priv.kds_cb); + goto finish; + } +#endif + + /* Create a cache for page flips */ + priv.page_flip_slab = kmem_cache_create("page flip slab", + sizeof(struct pl111_drm_flip_resource), 0, 0, NULL); + if (priv.page_flip_slab == NULL) { + DRM_ERROR("Failed to create slab\n"); + ret = -ENOMEM; + goto out_kds_callbacks; + } + + dev->dev_private = &priv; + + ret = pl111_modeset_init(dev); + if (ret != 0) { + pr_err("Failed to init modeset\n"); + goto out_slab; + } + + ret = pl111_device_init(dev); + if (ret != 0) { + DRM_ERROR("Failed to init MMIO and IRQ\n"); + goto out_modeset; + } + + ret = drm_vblank_init(dev, 1); + if (ret != 0) { + DRM_ERROR("Failed to init vblank\n"); + goto out_vblank; + } + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) + platform_set_drvdata(dev->platformdev, dev); +#endif + + goto finish; + +out_vblank: + pl111_device_fini(dev); +out_modeset: + pl111_modeset_fini(dev); +out_slab: + kmem_cache_destroy(priv.page_flip_slab); +out_kds_callbacks: +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + kds_callback_term(&priv.kds_obtain_current_cb); + kds_callback_term(&priv.kds_cb); +#endif +finish: + DRM_DEBUG_KMS("pl111_drm_load returned %d\n", ret); + return ret; +} + +static int pl111_drm_unload(struct drm_device *dev) +{ + pr_info("DRM %s\n", __func__); + + kmem_cache_destroy(priv.page_flip_slab); + + drm_vblank_cleanup(dev); + pl111_modeset_fini(dev); + pl111_device_fini(dev); + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + kds_callback_term(&priv.kds_obtain_current_cb); + kds_callback_term(&priv.kds_cb); +#endif + return 0; +} + +static struct vm_operations_struct pl111_gem_vm_ops = { + .fault = pl111_gem_fault, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +#else + .open = pl111_gem_vm_open, + .close = pl111_gem_vm_close, +#endif +}; + +static const struct file_operations drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = pl111_gem_mmap, + .poll = drm_poll, + .read = drm_read, +}; + +static struct drm_ioctl_desc pl111_ioctls[] = { + DRM_IOCTL_DEF_DRV(PL111_GEM_CREATE, pl111_drm_gem_create_ioctl, + DRM_CONTROL_ALLOW | DRM_UNLOCKED), +}; + +static struct drm_driver driver = { + .driver_features = + DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, + .load = pl111_drm_load, + .unload = pl111_drm_unload, + .context_dtor = NULL, + .preclose = pl111_drm_preclose, + .lastclose = pl111_drm_lastclose, + .suspend = pl111_drm_suspend, + .resume = pl111_drm_resume, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = pl111_enable_vblank, + .disable_vblank = pl111_disable_vblank, + .ioctls = pl111_ioctls, + .fops = &drm_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + .dumb_create = pl111_dumb_create, + .dumb_destroy = pl111_dumb_destroy, + .dumb_map_offset = pl111_dumb_map_offset, + .gem_free_object = pl111_gem_free_object, + .gem_vm_ops = &pl111_gem_vm_ops, + .prime_handle_to_fd = &pl111_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = &pl111_gem_prime_export, + .gem_prime_import = &pl111_gem_prime_import, +}; + +int pl111_drm_init(struct platform_device *dev) +{ + int ret; + pr_info("DRM %s\n", __func__); + pr_info("PL111 DRM initialize, driver name: %s, version %d.%d\n", + DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR); + driver.num_ioctls = ARRAY_SIZE(pl111_ioctls); + ret = 0; +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 15, 0)) + driver.kdriver.platform_device = dev; +#endif + return drm_platform_init(&driver, dev); + +} + +void pl111_drm_exit(struct platform_device *dev) +{ + pr_info("DRM %s\n", __func__); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) + drm_platform_exit(&driver, dev); +#else + drm_put_dev(platform_get_drvdata(dev)); +#endif +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c new file mode 100755 index 0000000..1131f46 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c @@ -0,0 +1,625 @@ +/* + * + * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_dma_buf.c + * Implementation of the dma_buf functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) +#define export_dma_buf export_dma_buf +#else +#define export_dma_buf dma_buf +#endif + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS +static void obtain_kds_if_currently_displayed(struct drm_device *dev, + struct pl111_gem_bo *bo, + struct dma_buf *dma_buf) +{ + unsigned long shared[1] = { 0 }; + struct kds_resource *resource_list[1]; + struct kds_resource_set *kds_res_set; + struct drm_crtc *crtc; + bool cb_has_called = false; + unsigned long flags; + int err; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); + + DRM_DEBUG_KMS("Obtaining initial KDS res for bo:%p dma_buf:%p\n", + bo, dma_buf); + + resource_list[0] = get_dma_buf_kds_resource(dma_buf); + get_dma_buf(dma_buf); + + /* + * Can't use kds_waitall(), because kbase will be let through due to + * locked ignore' + */ + err = kds_async_waitall(&kds_res_set, + &priv.kds_obtain_current_cb, &wake, + &cb_has_called, 1, shared, resource_list); + BUG_ON(err); + wait_event(wake, cb_has_called == true); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); + if (pl111_crtc->displaying_fb) { + struct pl111_drm_framebuffer *pl111_fb; + struct drm_framebuffer *fb = pl111_crtc->displaying_fb; + + pl111_fb = PL111_FB_FROM_FRAMEBUFFER(fb); + + if (pl111_fb->bo == bo) { + DRM_DEBUG_KMS("Initial KDS resource for bo %p", bo); + DRM_DEBUG_KMS(" is being displayed, keeping\n"); + /* There shouldn't be a previous buffer to release */ + BUG_ON(pl111_crtc->old_kds_res_set); + + if (kds_res_set == NULL) { + err = kds_async_waitall(&kds_res_set, + &priv.kds_obtain_current_cb, + &wake, &cb_has_called, + 1, shared, resource_list); + BUG_ON(err); + wait_event(wake, cb_has_called == true); + } + + /* Current buffer will need releasing on next flip */ + pl111_crtc->old_kds_res_set = kds_res_set; + + /* + * Clear kds_res_set, so a new kds_res_set is allocated + * for additional CRTCs + */ + kds_res_set = NULL; + } + } + spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); + } + + /* kds_res_set will be NULL here if any CRTCs are displaying fb */ + if (kds_res_set != NULL) { + DRM_DEBUG_KMS("Initial KDS resource for bo %p", bo); + DRM_DEBUG_KMS(" not being displayed, discarding\n"); + /* They're not being displayed, release them */ + kds_resource_set_release(&kds_res_set); + } + + dma_buf_put(dma_buf); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + +static int pl111_dma_buf_mmap(struct dma_buf *buffer, + struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = buffer->priv; + struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); + struct drm_device *dev = obj->dev; + int ret; + + DRM_DEBUG_KMS("DRM %s on dma_buf=%p\n", __func__, buffer); + + mutex_lock(&dev->struct_mutex); + ret = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&dev->struct_mutex); + if (ret) + return ret; + + return pl111_bo_mmap(obj, bo, vma, buffer->size); +} + +#else + +static int pl111_dma_buf_mmap(struct dma_buf *buffer, + struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = buffer->priv; + struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); + struct drm_device *dev = obj->dev; + + DRM_DEBUG_KMS("DRM %s on dma_buf=%p\n", __func__, buffer); + + mutex_lock(&dev->struct_mutex); + + /* Check for valid size. */ + if (obj->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + BUG_ON(!dev->driver->gem_vm_ops); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; +#else + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; +#endif + + vma->vm_ops = dev->driver->gem_vm_ops; + vma->vm_private_data = obj; + + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_reference(obj); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + pl111_drm_vm_open_locked(dev, vma); +#else + drm_vm_open_locked(dev, vma); +#endif + + mutex_unlock(&dev->struct_mutex); + + return pl111_bo_mmap(obj, bo, vma, buffer->size); +} + +#endif /* KERNEL_VERSION */ + +static void pl111_dma_buf_release(struct dma_buf *buf) +{ + /* + * Need to release the dma_buf's reference on the gem object it was + * exported from, and also clear the gem object's export_dma_buf + * pointer to this dma_buf as it no longer exists + */ + struct drm_gem_object *obj = (struct drm_gem_object *)buf->priv; + struct pl111_gem_bo *bo; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + struct drm_crtc *crtc; + unsigned long flags; +#endif + bo = PL111_BO_FROM_GEM(obj); + + DRM_DEBUG_KMS("Releasing dma_buf %p, drm_gem_obj=%p\n", buf, obj); + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + list_for_each_entry(crtc, &bo->gem_object.dev->mode_config.crtc_list, + head) { + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); + if (pl111_crtc->displaying_fb) { + struct pl111_drm_framebuffer *pl111_fb; + struct drm_framebuffer *fb = pl111_crtc->displaying_fb; + + pl111_fb = PL111_FB_FROM_FRAMEBUFFER(fb); + if (pl111_fb->bo == bo) { + kds_resource_set_release(&pl111_crtc->old_kds_res_set); + pl111_crtc->old_kds_res_set = NULL; + } + } + spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); + } +#endif + mutex_lock(&priv.export_dma_buf_lock); + + obj->export_dma_buf = NULL; + drm_gem_object_unreference_unlocked(obj); + + mutex_unlock(&priv.export_dma_buf_lock); +} + +static int pl111_dma_buf_attach(struct dma_buf *buf, struct device *dev, + struct dma_buf_attachment *attach) +{ + DRM_DEBUG_KMS("Attaching dma_buf %p to device %p attach=%p\n", buf, + dev, attach); + + attach->priv = dev; + + return 0; +} + +static void pl111_dma_buf_detach(struct dma_buf *buf, + struct dma_buf_attachment *attach) +{ + DRM_DEBUG_KMS("Detaching dma_buf %p attach=%p\n", attach->dmabuf, + attach); +} + +/* Heavily from exynos_drm_dmabuf.c */ +static struct sg_table *pl111_dma_buf_map_dma_buf(struct dma_buf_attachment + *attach, + enum dma_data_direction + direction) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); + struct drm_device *dev = obj->dev; + int size, n_pages, nents; + struct scatterlist *s, *sg; + struct sg_table *sgt; + int ret, i; + + DRM_DEBUG_KMS("Mapping dma_buf %p from attach=%p (bo=%p)\n", attach->dmabuf, + attach, bo); + + /* + * Nothing to do, if we are trying to map a dmabuf that has been imported. + * Just return the existing sgt. + */ + if (obj->import_attach) { + BUG_ON(!bo->sgt); + return bo->sgt; + } + + size = obj->size; + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (bo->type & PL111_BOT_DMA) { + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_ERROR("Failed to allocate sg_table\n"); + return ERR_PTR(-ENOMEM); + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret < 0) { + DRM_ERROR("Failed to allocate page table\n"); + return ERR_PTR(-ENOMEM); + } + sg_dma_len(sgt->sgl) = size; + /* We use DMA coherent mappings for PL111_BOT_DMA so we must + * use the virtual address returned at buffer allocation */ + sg_set_buf(sgt->sgl, bo->backing_data.dma.fb_cpu_addr, size); + sg_dma_address(sgt->sgl) = bo->backing_data.dma.fb_dev_addr; + } else { /* PL111_BOT_SHM */ + struct page **pages; + int pg = 0; + + mutex_lock(&dev->struct_mutex); + pages = get_pages(obj); + if (IS_ERR(pages)) { + dev_err(obj->dev->dev, "could not get pages: %ld\n", + PTR_ERR(pages)); + return ERR_CAST(pages); + } + sgt = drm_prime_pages_to_sg(pages, n_pages); + if (sgt == NULL) + return ERR_PTR(-ENOMEM); + + pl111_gem_sync_to_dma(bo); + + /* + * At this point the pages have been dma-mapped by either + * get_pages() for non cached maps or pl111_gem_sync_to_dma() + * for cached. So the physical addresses can be assigned + * to the sg entries. + * drm_prime_pages_to_sg() may have combined contiguous pages + * into chunks so we assign the physical address of the first + * page of a chunk to the chunk and check that the physical + * addresses of the rest of the pages in that chunk are also + * contiguous. + */ + sg = sgt->sgl; + nents = sgt->nents; + + for_each_sg(sg, s, nents, i) { + int j, n_pages_in_chunk = sg_dma_len(s) >> PAGE_SHIFT; + + sg_dma_address(s) = bo->backing_data.shm.dma_addrs[pg]; + + for (j = pg+1; j < pg+n_pages_in_chunk; j++) { + BUG_ON(bo->backing_data.shm.dma_addrs[j] != + bo->backing_data.shm.dma_addrs[j-1]+PAGE_SIZE); + } + + pg += n_pages_in_chunk; + } + + mutex_unlock(&dev->struct_mutex); + } + bo->sgt = sgt; + return sgt; +} + +static void pl111_dma_buf_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); + + DRM_DEBUG_KMS("Unmapping dma_buf %p from attach=%p (bo=%p)\n", attach->dmabuf, + attach, bo); + + sg_free_table(sgt); + kfree(sgt); + bo->sgt = NULL; +} + +/* + * There isn't any operation here that can sleep or fail so this callback can + * be used for both kmap and kmap_atomic implementations. + */ +static void *pl111_dma_buf_kmap(struct dma_buf *dma_buf, unsigned long pageno) +{ + struct pl111_gem_bo *bo = dma_buf->priv; + void *vaddr = NULL; + + /* Make sure we cannot access outside the memory range */ + if (((pageno + 1) << PAGE_SHIFT) > bo->gem_object.size) + return NULL; + + if (bo->type & PL111_BOT_DMA) { + vaddr = (bo->backing_data.dma.fb_cpu_addr + + (pageno << PAGE_SHIFT)); + } else { + vaddr = page_address(bo->backing_data.shm.pages[pageno]); + } + + return vaddr; +} + +/* + * Find a scatterlist that starts in "start" and has "len" + * or return a NULL dma_handle. + */ +static dma_addr_t pl111_find_matching_sg(struct sg_table *sgt, size_t start, + size_t len) +{ + struct scatterlist *sg; + unsigned int count; + size_t size = 0; + dma_addr_t dma_handle = 0; + + /* Find a scatterlist that starts in "start" and has "len" + * or return error */ + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + if ((size == start) && (len == sg_dma_len(sg))) { + dma_handle = sg_dma_address(sg); + break; + } + size += sg_dma_len(sg); + } + return dma_handle; +} + +static int pl111_dma_buf_begin_cpu(struct dma_buf *dma_buf, + size_t start, size_t len, + enum dma_data_direction dir) +{ + struct pl111_gem_bo *bo = dma_buf->priv; + struct sg_table *sgt = bo->sgt; + dma_addr_t dma_handle; + + if ((start + len) > bo->gem_object.size) + return -EINVAL; + + if (!(bo->type & PL111_BOT_SHM)) { + struct device *dev = bo->gem_object.dev->dev; + + dma_handle = pl111_find_matching_sg(sgt, start, len); + if (!dma_handle) + return -EINVAL; + + dma_sync_single_range_for_cpu(dev, dma_handle, 0, len, dir); + } + /* PL111_BOT_DMA uses coherents mappings, no need to sync */ + return 0; +} + +static void pl111_dma_buf_end_cpu(struct dma_buf *dma_buf, + size_t start, size_t len, + enum dma_data_direction dir) +{ + struct pl111_gem_bo *bo = dma_buf->priv; + struct sg_table *sgt = bo->sgt; + dma_addr_t dma_handle; + + if ((start + len) > bo->gem_object.size) + return; + + if (!(bo->type & PL111_BOT_DMA)) { + struct device *dev = bo->gem_object.dev->dev; + + dma_handle = pl111_find_matching_sg(sgt, start, len); + if (!dma_handle) + return; + + dma_sync_single_range_for_device(dev, dma_handle, 0, len, dir); + } + /* PL111_BOT_DMA uses coherents mappings, no need to sync */ +} + +static struct dma_buf_ops pl111_dma_buf_ops = { + .release = &pl111_dma_buf_release, + .attach = &pl111_dma_buf_attach, + .detach = &pl111_dma_buf_detach, + .map_dma_buf = &pl111_dma_buf_map_dma_buf, + .unmap_dma_buf = &pl111_dma_buf_unmap_dma_buf, + .kmap_atomic = &pl111_dma_buf_kmap, + .kmap = &pl111_dma_buf_kmap, + .begin_cpu_access = &pl111_dma_buf_begin_cpu, + .end_cpu_access = &pl111_dma_buf_end_cpu, + .mmap = &pl111_dma_buf_mmap, +}; + +struct drm_gem_object *pl111_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attachment; + struct drm_gem_object *obj; + struct pl111_gem_bo *bo; + struct scatterlist *sgl; + struct sg_table *sgt; + dma_addr_t cont_phys; + int ret = 0; + int i; + + DRM_DEBUG_KMS("DRM %s on dev=%p dma_buf=%p\n", __func__, dev, dma_buf); + + /* is this one of own objects? */ + if (dma_buf->ops == &pl111_dma_buf_ops) { + obj = dma_buf->priv; + /* is it from our device? */ + if (obj->dev == dev) { + /* + * Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + /* before v3.10.0 we assume the caller has taken a ref on the dma_buf + * we don't want it for self-imported buffers so drop it here */ + dma_buf_put(dma_buf); +#endif + + return obj; + } + } + + attachment = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attachment)) + return ERR_CAST(attachment); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) + /* from 3.10.0 we assume the caller has not taken a ref so we take one here */ + get_dma_buf(dma_buf); +#endif + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = PTR_ERR(sgt); + goto err_buf_detach; + } + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) { + DRM_ERROR("%s: failed to allocate buffer object.\n", __func__); + ret = -ENOMEM; + goto err_unmap_attach; + } + + /* Find out whether the buffer is contiguous or not */ + sgl = sgt->sgl; + cont_phys = sg_phys(sgl); + bo->type |= PL111_BOT_DMA; + for_each_sg(sgt->sgl, sgl, sgt->nents, i) { + dma_addr_t real_phys = sg_phys(sgl); + if (real_phys != cont_phys) { + bo->type &= ~PL111_BOT_DMA; + break; + } + cont_phys += (PAGE_SIZE - sgl->offset); + } + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + ret = drm_gem_private_object_init(dev, &bo->gem_object, + dma_buf->size); + if (ret != 0) { + DRM_ERROR("DRM could not import DMA GEM obj\n"); + goto err_free_buffer; + } +#else + drm_gem_private_object_init(dev, &bo->gem_object, dma_buf->size); +#endif + + if (bo->type & PL111_BOT_DMA) { + bo->backing_data.dma.fb_cpu_addr = sg_virt(sgt->sgl); + bo->backing_data.dma.fb_dev_addr = sg_phys(sgt->sgl); + DRM_DEBUG_KMS("DRM %s pl111_gem_bo=%p, contiguous import\n", __func__, bo); + } else { /* PL111_BOT_SHM */ + DRM_DEBUG_KMS("DRM %s pl111_gem_bo=%p, non contiguous import\n", __func__, bo); + } + + bo->gem_object.import_attach = attachment; + bo->sgt = sgt; + + return &bo->gem_object; + +err_free_buffer: + kfree(bo); + bo = NULL; +err_unmap_attach: + dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL); +err_buf_detach: + dma_buf_detach(dma_buf, attachment); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) + /* from 3.10.0 we will have taken a ref so drop it here */ + dma_buf_put(dma_buf); +#endif + return ERR_PTR(ret); +} + +struct dma_buf *pl111_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags) +{ + struct dma_buf *new_buf; + struct pl111_gem_bo *bo; + size_t size; + + DRM_DEBUG("DRM %s on dev=%p drm_gem_obj=%p\n", __func__, dev, obj); + size = obj->size; + + new_buf = dma_buf_export(obj /*priv */ , &pl111_dma_buf_ops, size, + flags | O_RDWR); + bo = PL111_BO_FROM_GEM(new_buf->priv); + + /* + * bo->gem_object.export_dma_buf not setup until after gem_prime_export + * finishes + */ + +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + /* + * Ensure that we hold the kds resource if it's the currently + * displayed buffer. + */ + obtain_kds_if_currently_displayed(dev, bo, new_buf); +#endif + + DRM_DEBUG("Created dma_buf %p\n", new_buf); + + return new_buf; +} + +int pl111_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd) +{ + int result; + /* + * This will re-use any existing exports, and calls + * driver->gem_prime_export to do the first export when needed + */ + DRM_DEBUG_KMS("DRM %s on file_priv=%p, handle=0x%.8x\n", __func__, + file_priv, handle); + + mutex_lock(&priv.export_dma_buf_lock); + result = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, + prime_fd); + mutex_unlock(&priv.export_dma_buf_lock); + + return result; +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c new file mode 100755 index 0000000..78c91c0 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c @@ -0,0 +1,107 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_encoder.c + * Implementation of the encoder functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + +bool pl111_encoder_helper_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); + return true; +} + +void pl111_encoder_helper_prepare(struct drm_encoder *encoder) +{ + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); +} + +void pl111_encoder_helper_commit(struct drm_encoder *encoder) +{ + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); +} + +void pl111_encoder_helper_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); +} + +void pl111_encoder_helper_disable(struct drm_encoder *encoder) +{ + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); +} + +void pl111_encoder_destroy(struct drm_encoder *encoder) +{ + struct pl111_drm_encoder *pl111_encoder = + PL111_ENCODER_FROM_ENCODER(encoder); + + DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); + + drm_encoder_cleanup(encoder); + kfree(pl111_encoder); +} + +const struct drm_encoder_funcs encoder_funcs = { + .destroy = pl111_encoder_destroy, +}; + +const struct drm_encoder_helper_funcs encoder_helper_funcs = { + .mode_fixup = pl111_encoder_helper_mode_fixup, + .prepare = pl111_encoder_helper_prepare, + .commit = pl111_encoder_helper_commit, + .mode_set = pl111_encoder_helper_mode_set, + .disable = pl111_encoder_helper_disable, +}; + +struct pl111_drm_encoder *pl111_encoder_create(struct drm_device *dev, + int possible_crtcs) +{ + struct pl111_drm_encoder *pl111_encoder; + + pl111_encoder = kzalloc(sizeof(struct pl111_drm_encoder), GFP_KERNEL); + if (pl111_encoder == NULL) { + pr_err("Failed to allocated pl111_drm_encoder\n"); + return NULL; + } + + drm_encoder_init(dev, &pl111_encoder->encoder, &encoder_funcs, + DRM_MODE_ENCODER_DAC); + + drm_encoder_helper_add(&pl111_encoder->encoder, &encoder_helper_funcs); + + pl111_encoder->encoder.possible_crtcs = possible_crtcs; + + return pl111_encoder; +} + diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c new file mode 100755 index 0000000..f575c9e --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c @@ -0,0 +1,202 @@ +/* + * + * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_fb.c + * Implementation of the framebuffer functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_crtc.h> +#include "pl111_drm.h" + +static void pl111_fb_destroy(struct drm_framebuffer *framebuffer) +{ + struct pl111_drm_framebuffer *pl111_fb; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + struct drm_crtc *crtc; + unsigned long flags; +#endif + DRM_DEBUG_KMS("Destroying framebuffer 0x%p...\n", framebuffer); + + pl111_fb = PL111_FB_FROM_FRAMEBUFFER(framebuffer); + + /* + * Because flips are deferred, wait for all previous flips to complete + */ + wait_event(priv.wait_for_flips, + atomic_read(&priv.nr_flips_in_flight) == 0); +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + /* + * Release KDS resources if it's currently being displayed. Only occurs + * when the last framebuffer is destroyed. + */ + list_for_each_entry(crtc, &framebuffer->dev->mode_config.crtc_list, + head) { + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); + spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); + if (pl111_crtc->displaying_fb == framebuffer) { + /* Release the current buffers */ + if (pl111_crtc->old_kds_res_set != NULL) { + DRM_DEBUG_KMS("Releasing KDS resources for "); + DRM_DEBUG_KMS("displayed 0x%p\n", framebuffer); + kds_resource_set_release( + &pl111_crtc->old_kds_res_set); + } + pl111_crtc->old_kds_res_set = NULL; + } + spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, + flags); + } +#endif + drm_framebuffer_cleanup(framebuffer); + + if ((pl111_fb->bo != NULL) && (&pl111_fb->bo->gem_object != NULL)) + drm_gem_object_unreference_unlocked(&pl111_fb->bo->gem_object); + + kfree(pl111_fb); + + DRM_DEBUG_KMS("Destroyed framebuffer 0x%p\n", framebuffer); +} + +static int pl111_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct pl111_gem_bo *bo = PL111_BO_FROM_FRAMEBUFFER(fb); + DRM_DEBUG_KMS("DRM %s on fb=%p\n", __func__, fb); + + if (bo == NULL) + return -EINVAL; + + return drm_gem_handle_create(file_priv, &bo->gem_object, handle); +} + +const struct drm_framebuffer_funcs fb_funcs = { + .destroy = pl111_fb_destroy, + .create_handle = pl111_fb_create_handle, +}; + +struct drm_framebuffer *pl111_fb_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct pl111_drm_framebuffer *pl111_fb = NULL; + struct drm_framebuffer *fb = NULL; + struct drm_gem_object *gem_obj; + struct pl111_gem_bo *bo; + int err = 0; + size_t min_size; + int bpp; + int depth; + + pr_info("DRM %s\n", __func__); + gem_obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); + if (gem_obj == NULL) { + DRM_ERROR("Could not get gem obj from handle to create fb\n"); + err = -ENOENT; + goto error; + } + + bo = PL111_BO_FROM_GEM(gem_obj); + drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); + + if (mode_cmd->pitches[0] < mode_cmd->width * (bpp >> 3)) { + DRM_ERROR("bad pitch %u for plane 0\n", mode_cmd->pitches[0]); + err = -EINVAL; + goto error; + } + + min_size = (mode_cmd->height - 1) * mode_cmd->pitches[0] + + mode_cmd->width * (bpp >> 3); + + if (bo->gem_object.size < min_size) { + DRM_ERROR("gem obj size < min size\n"); + err = -EINVAL; + goto error; + } + + /* We can't scan out SHM so we can't create an fb for it */ + if (!(bo->type & PL111_BOT_DMA)) { + DRM_ERROR("Can't create FB for non-scanout buffer\n"); + err = -EINVAL; + goto error; + } + + switch ((char)(mode_cmd->pixel_format & 0xFF)) { + case 'Y': + case 'U': + case 'V': + case 'N': + case 'T': + DRM_ERROR("YUV formats not supported\n"); + err = -EINVAL; + goto error; + } + + pl111_fb = kzalloc(sizeof(struct pl111_drm_framebuffer), GFP_KERNEL); + if (pl111_fb == NULL) { + DRM_ERROR("Could not allocate pl111_drm_framebuffer\n"); + err = -ENOMEM; + goto error; + } + fb = &pl111_fb->fb; + + err = drm_framebuffer_init(dev, fb, &fb_funcs); + if (err) { + DRM_ERROR("drm_framebuffer_init failed\n"); + kfree(fb); + fb = NULL; + goto error; + } + + drm_helper_mode_fill_fb_struct(fb, mode_cmd); + + /* The only framebuffer formats supported by pl111 + * are 16 bpp or 32 bpp with 24 bit depth. + * See clcd_enable() + */ + if (!((fb->bits_per_pixel == 16) || + (fb->bits_per_pixel == 32 && fb->depth == 24))) { + DRM_DEBUG_KMS("unsupported pixel format bpp=%d, depth=%d\n", fb->bits_per_pixel, fb->depth); + drm_framebuffer_cleanup(fb); + kfree(fb); + fb = NULL; + err = -EINVAL; + goto error; + } + + pl111_fb->bo = bo; + + DRM_DEBUG_KMS("Created fb 0x%p with gem_obj 0x%p physaddr=0x%.8x\n", + fb, gem_obj, bo->backing_data.dma.fb_dev_addr); + + return fb; + +error: + if (gem_obj != NULL) + drm_gem_object_unreference_unlocked(gem_obj); + + return ERR_PTR(err); +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h new file mode 100755 index 0000000..494baa0 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h @@ -0,0 +1,130 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_funcs.h + * Function prototypes for PL111 DRM + */ + +#ifndef PL111_DRM_FUNCS_H_ +#define PL111_DRM_FUNCS_H_ + +/* Platform Initialisation */ +int pl111_drm_init(struct platform_device *dev); +void pl111_drm_exit(struct platform_device *dev); + +/* KDS Callbacks */ +void show_framebuffer_on_crtc_cb(void *cb1, void *cb2); +void release_kds_resource_and_display(struct pl111_drm_flip_resource *flip_res); + +/* CRTC Functions */ +struct pl111_drm_crtc *pl111_crtc_create(struct drm_device *dev); +struct pl111_drm_crtc *pl111_crtc_dummy_create(struct drm_device *dev); +void pl111_crtc_destroy(struct drm_crtc *crtc); + +bool pl111_crtc_is_fb_currently_displayed(struct drm_device *dev, + struct drm_framebuffer *fb); + +int show_framebuffer_on_crtc(struct drm_crtc *crtc, + struct drm_framebuffer *fb, bool page_flip, + struct drm_pending_vblank_event *event); + +/* Common IRQ handler */ +void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc); + +int pl111_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height); +int pl111_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y); + +/* Connector Functions */ +struct pl111_drm_connector *pl111_connector_create(struct drm_device *dev); +void pl111_connector_destroy(struct drm_connector *connector); +struct pl111_drm_connector *pl111_connector_dummy_create(struct drm_device + *dev); + +/* Encoder Functions */ +struct pl111_drm_encoder *pl111_encoder_create(struct drm_device *dev, + int possible_crtcs); +struct pl111_drm_encoder *pl111_encoder_dummy_create(struct drm_device *dev, + int possible_crtcs); +void pl111_encoder_destroy(struct drm_encoder *encoder); + +/* Frame Buffer Functions */ +struct drm_framebuffer *pl111_fb_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd); + +/* VMA Functions */ +int pl111_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int pl111_gem_mmap(struct file *file_priv, struct vm_area_struct *vma); +struct page **get_pages(struct drm_gem_object *obj); +void put_pages(struct drm_gem_object *obj, struct page **pages); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +void pl111_drm_vm_open_locked(struct drm_device *dev, + struct vm_area_struct *vma); +void pl111_gem_vm_open(struct vm_area_struct *vma); +void pl111_gem_vm_close(struct vm_area_struct *vma); +#endif + +/* Suspend Functions */ +int pl111_drm_resume(struct drm_device *dev); +int pl111_drm_suspend(struct drm_device *dev, pm_message_t state); + +/* GEM Functions */ +int pl111_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int pl111_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle); +int pl111_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); +void pl111_gem_free_object(struct drm_gem_object *obj); + +int pl111_bo_mmap(struct drm_gem_object *obj, struct pl111_gem_bo *bo, + struct vm_area_struct *vma, size_t size); +void pl111_gem_sync_to_cpu(struct pl111_gem_bo *bo, int pgoff); +void pl111_gem_sync_to_dma(struct pl111_gem_bo *bo); + +/* DMA BUF Functions */ +struct drm_gem_object *pl111_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); +int pl111_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); +struct dma_buf *pl111_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags); + +/* Pl111 Functions */ +void show_framebuffer_on_crtc_cb_internal(struct pl111_drm_flip_resource + *flip_res, struct drm_framebuffer *fb); +int clcd_disable(struct drm_crtc *crtc); +void do_flip_to_res(struct pl111_drm_flip_resource *flip_res); +int pl111_amba_probe(struct amba_device *dev, const struct amba_id *id); +int pl111_amba_remove(struct amba_device *dev); + +int pl111_device_init(struct drm_device *dev); +void pl111_device_fini(struct drm_device *dev); + +void pl111_convert_drm_mode_to_timing(struct drm_display_mode *mode, + struct clcd_regs *timing); +void pl111_convert_timing_to_drm_mode(struct clcd_regs *timing, + struct drm_display_mode *mode); +#endif /* PL111_DRM_FUNCS_H_ */ diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c new file mode 100755 index 0000000..13fb256 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c @@ -0,0 +1,476 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_gem.c + * Implementation of the GEM functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <asm/cacheflush.h> +#include <asm/outercache.h> +#include "pl111_drm.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) +#include <linux/dma-attrs.h> +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) +#include <drm/drm_vma_manager.h> +#endif + +void pl111_gem_free_object(struct drm_gem_object *obj) +{ + struct pl111_gem_bo *bo; + struct drm_device *dev = obj->dev; + DRM_DEBUG_KMS("DRM %s on drm_gem_object=%p\n", __func__, obj); + + bo = PL111_BO_FROM_GEM(obj); + + if (obj->import_attach) + drm_prime_gem_destroy(obj, bo->sgt); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + if (obj->map_list.map != NULL) + drm_gem_free_mmap_offset(obj); +#else + drm_gem_free_mmap_offset(obj); +#endif + /* + * Only free the backing memory if the object has not been imported. + * If it has been imported, the exporter is in charge to free that + * once dmabuf's refcount becomes 0. + */ + if (obj->import_attach) + goto imported_out; + + if (bo->type & PL111_BOT_DMA) { + dma_free_writecombine(dev->dev, obj->size, + bo->backing_data.dma.fb_cpu_addr, + bo->backing_data.dma.fb_dev_addr); + } else if (bo->backing_data.shm.pages != NULL) { + put_pages(obj, bo->backing_data.shm.pages); + } + +imported_out: + drm_gem_object_release(obj); + + kfree(bo); + + DRM_DEBUG_KMS("Destroyed dumb_bo handle 0x%p\n", bo); +} + +static int pl111_gem_object_create(struct drm_device *dev, u64 size, + u32 flags, struct drm_file *file_priv, + u32 *handle) +{ + int ret = 0; + struct pl111_gem_bo *bo = NULL; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (bo == NULL) { + ret = -ENOMEM; + goto finish; + } + + bo->type = flags; + +#ifndef ARCH_HAS_SG_CHAIN + /* + * If the ARCH can't chain we can't have non-contiguous allocs larger + * than a single sg can hold. + * In this case we fall back to using contiguous memory + */ + if (!(bo->type & PL111_BOT_DMA)) { + long unsigned int n_pages = + PAGE_ALIGN(size) >> PAGE_SHIFT; + if (n_pages > SG_MAX_SINGLE_ALLOC) { + bo->type |= PL111_BOT_DMA; + /* + * Non-contiguous allocation request changed to + * contigous + */ + DRM_INFO("non-contig alloc to contig %lu > %lu pages.", + n_pages, SG_MAX_SINGLE_ALLOC); + } + } +#endif + if (bo->type & PL111_BOT_DMA) { + /* scanout compatible - use physically contiguous buffer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + bo->backing_data.dma.fb_cpu_addr = + dma_alloc_attrs(dev->dev, size, + &bo->backing_data.dma.fb_dev_addr, + GFP_KERNEL, + &attrs); + if (bo->backing_data.dma.fb_cpu_addr == NULL) { + DRM_ERROR("dma_alloc_attrs failed\n"); + ret = -ENOMEM; + goto free_bo; + } +#else + bo->backing_data.dma.fb_cpu_addr = + dma_alloc_writecombine(dev->dev, size, + &bo->backing_data.dma.fb_dev_addr, + GFP_KERNEL); + if (bo->backing_data.dma.fb_cpu_addr == NULL) { + DRM_ERROR("dma_alloc_writecombine failed\n"); + ret = -ENOMEM; + goto free_bo; + } +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + ret = drm_gem_private_object_init(dev, &bo->gem_object, + size); + if (ret != 0) { + DRM_ERROR("DRM could not initialise GEM object\n"); + goto free_dma; + } +#else + drm_gem_private_object_init(dev, &bo->gem_object, size); +#endif + + } else { /* PL111_BOT_SHM */ + /* not scanout compatible - use SHM backed object */ + ret = drm_gem_object_init(dev, &bo->gem_object, size); + if (ret != 0) { + DRM_ERROR("DRM could not init SHM backed GEM obj\n"); + ret = -ENOMEM; + goto free_bo; + } + DRM_DEBUG_KMS("Num bytes: %d\n", bo->gem_object.size); + } + + DRM_DEBUG("s=%llu, flags=0x%x, %s 0x%.8lx, type=%d\n", + size, flags, + (bo->type & PL111_BOT_DMA) ? "physaddr" : "shared page array", + (bo->type & PL111_BOT_DMA) ? + (unsigned long)bo->backing_data.dma.fb_dev_addr: + (unsigned long)bo->backing_data.shm.pages, + bo->type); + + ret = drm_gem_handle_create(file_priv, &bo->gem_object, handle); + if (ret != 0) { + DRM_ERROR("DRM failed to create GEM handle\n"); + goto obj_release; + } + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(&bo->gem_object); + + return 0; + +obj_release: + drm_gem_object_release(&bo->gem_object); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) +free_dma: +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + if (bo->type & PL111_BOT_DMA) { + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + dma_free_attrs(dev->dev, size, + bo->backing_data.dma.fb_cpu_addr, + bo->backing_data.dma.fb_dev_addr, + &attrs); + } +#else + if (bo->type & PL111_BOT_DMA) + dma_free_writecombine(dev->dev, size, + bo->backing_data.dma.fb_cpu_addr, + bo->backing_data.dma.fb_dev_addr); +#endif +free_bo: + kfree(bo); +finish: + return ret; +} + +int pl111_drm_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_pl111_gem_create *args = data; + uint32_t bytes_pp; + + /* Round bpp up, to allow for case where bpp<8 */ + bytes_pp = args->bpp >> 3; + if (args->bpp & ((1 << 3) - 1)) + bytes_pp++; + + if (args->flags & ~PL111_BOT_MASK) { + DRM_ERROR("wrong flags: 0x%x\n", args->flags); + return -EINVAL; + } + + args->pitch = ALIGN(args->width * bytes_pp, 64); + args->size = PAGE_ALIGN(args->pitch * args->height); + + DRM_DEBUG_KMS("gem_create w=%d h=%d p=%d bpp=%d b=%d s=%llu f=0x%x\n", + args->width, args->height, args->pitch, args->bpp, + bytes_pp, args->size, args->flags); + + return pl111_gem_object_create(dev, args->size, args->flags, file_priv, + &args->handle); +} + +int pl111_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + uint32_t bytes_pp; + + /* Round bpp up, to allow for case where bpp<8 */ + bytes_pp = args->bpp >> 3; + if (args->bpp & ((1 << 3) - 1)) + bytes_pp++; + + if (args->flags) { + DRM_ERROR("flags must be zero: 0x%x\n", args->flags); + return -EINVAL; + } + + args->pitch = ALIGN(args->width * bytes_pp, 64); + args->size = PAGE_ALIGN(args->pitch * args->height); + + DRM_DEBUG_KMS("dumb_create w=%d h=%d p=%d bpp=%d b=%d s=%llu f=0x%x\n", + args->width, args->height, args->pitch, args->bpp, + bytes_pp, args->size, args->flags); + + return pl111_gem_object_create(dev, args->size, + PL111_BOT_DMA | PL111_BOT_UNCACHED, + file_priv, &args->handle); +} + +int pl111_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle) +{ + DRM_DEBUG_KMS("DRM %s on file_priv=%p handle=0x%.8x\n", __func__, + file_priv, handle); + return drm_gem_handle_delete(file_priv, handle); +} + +int pl111_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct drm_gem_object *obj; + int ret = 0; + DRM_DEBUG_KMS("DRM %s on file_priv=%p handle=0x%.8x\n", __func__, + file_priv, handle); + + /* GEM does all our handle to object mapping */ + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (obj == NULL) { + ret = -ENOENT; + goto fail; + } + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + if (obj->map_list.map == NULL) { + ret = drm_gem_create_mmap_offset(obj); + if (ret != 0) { + drm_gem_object_unreference_unlocked(obj); + goto fail; + } + } +#else + ret = drm_gem_create_mmap_offset(obj); + if (ret != 0) { + drm_gem_object_unreference_unlocked(obj); + goto fail; + } +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) + *offset = (uint64_t) obj->map_list.hash.key << PAGE_SHIFT; +#else + *offset = drm_vma_node_offset_addr(&obj->vma_node); + DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); +#endif + + drm_gem_object_unreference_unlocked(obj); +fail: + return ret; +} + +/* sync the buffer for DMA access */ +void pl111_gem_sync_to_dma(struct pl111_gem_bo *bo) +{ + struct drm_device *dev = bo->gem_object.dev; + + if (!(bo->type & PL111_BOT_DMA) && (bo->type & PL111_BOT_CACHED)) { + int i, npages = bo->gem_object.size >> PAGE_SHIFT; + struct page **pages = bo->backing_data.shm.pages; + bool dirty = false; + + for (i = 0; i < npages; i++) { + if (!bo->backing_data.shm.dma_addrs[i]) { + DRM_DEBUG("%s: dma map page=%d bo=%p\n", __func__, i, bo); + bo->backing_data.shm.dma_addrs[i] = + dma_map_page(dev->dev, pages[i], 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + dirty = true; + } + } + + if (dirty) { + DRM_DEBUG("%s: zap ptes (and flush cache) bo=%p\n", __func__, bo); + /* + * TODO MIDEGL-1813 + * + * Use flush_cache_page() and outer_flush_range() to + * flush only the user space mappings of the dirty pages + */ + flush_cache_all(); + outer_flush_all(); + unmap_mapping_range(bo->gem_object.filp->f_mapping, 0, + bo->gem_object.size, 1); + } + } +} + +void pl111_gem_sync_to_cpu(struct pl111_gem_bo *bo, int pgoff) +{ + struct drm_device *dev = bo->gem_object.dev; + + /* + * TODO MIDEGL-1808 + * + * The following check was meant to detect if the CPU is trying to access + * a buffer that is currently mapped for DMA accesses, which is illegal + * as described by the DMA-API. + * + * However, some of our tests are trying to do that, which triggers the message + * below and avoids dma-unmapping the pages not to annoy the DMA device but that + * leads to the test failing because of caches not being properly flushed. + */ + + /* + if (bo->sgt) { + DRM_ERROR("%s: the CPU is trying to access a dma-mapped buffer\n", __func__); + return; + } + */ + + if (!(bo->type & PL111_BOT_DMA) && (bo->type & PL111_BOT_CACHED) && + bo->backing_data.shm.dma_addrs[pgoff]) { + DRM_DEBUG("%s: unmap bo=%p (s=%d), paddr=%08x\n", + __func__, bo, bo->gem_object.size, + bo->backing_data.shm.dma_addrs[pgoff]); + dma_unmap_page(dev->dev, bo->backing_data.shm.dma_addrs[pgoff], + PAGE_SIZE, DMA_BIDIRECTIONAL); + bo->backing_data.shm.dma_addrs[pgoff] = 0; + } +} + +/* Based on omapdrm driver */ +int pl111_bo_mmap(struct drm_gem_object *obj, struct pl111_gem_bo *bo, + struct vm_area_struct *vma, size_t size) +{ + DRM_DEBUG("DRM %s on drm_gem_object=%p, pl111_gem_bo=%p type=%08x\n", + __func__, obj, bo, bo->type); + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + if (bo->type & PL111_BOT_WC) { + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + } else if (bo->type & PL111_BOT_CACHED) { + /* + * Objects that do not have a filp (DMA backed) can't be + * mapped as cached now. Write-combine should be enough. + */ + if (WARN_ON(!obj->filp)) + return -EINVAL; + + /* + * As explained in Documentation/dma-buf-sharing.txt + * we need this trick so that we can manually zap ptes + * in order to fake coherency. + */ + fput(vma->vm_file); + get_file(obj->filp); + vma->vm_file = obj->filp; + + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + } else { /* PL111_BOT_UNCACHED */ + vma->vm_page_prot = + pgprot_noncached(vm_get_page_prot(vma->vm_flags)); + } + return 0; +} + +int pl111_gem_mmap(struct file *file_priv, struct vm_area_struct *vma) +{ + int ret; + struct drm_file *priv = file_priv->private_data; + struct drm_device *dev = priv->minor->dev; +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) + struct drm_gem_mm *mm = dev->mm_private; + struct drm_hash_item *hash; + struct drm_local_map *map = NULL; +#else + struct drm_vma_offset_node *node; +#endif + struct drm_gem_object *obj; + struct pl111_gem_bo *bo; + + DRM_DEBUG_KMS("DRM %s\n", __func__); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) + drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash); + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; + obj = map->handle; +#else + node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + obj = container_of(node, struct drm_gem_object, vma_node); +#endif + bo = PL111_BO_FROM_GEM(obj); + + DRM_DEBUG_KMS("DRM %s on pl111_gem_bo %p bo->type 0x%08x\n", __func__, bo, bo->type); + + /* for an imported buffer we let the exporter handle the mmap */ + if (obj->import_attach) + return dma_buf_mmap(obj->import_attach->dmabuf, vma, 0); + + ret = drm_gem_mmap(file_priv, vma); + if (ret < 0) { + DRM_ERROR("failed to mmap\n"); + return ret; + } + + /* Our page fault handler uses the page offset calculated from the vma, + * so we need to remove the gem cookie offset specified in the call. + */ + vma->vm_pgoff = 0; + + return pl111_bo_mmap(obj, bo, vma, vma->vm_end - vma->vm_start); +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c new file mode 100755 index 0000000..1d613d0 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c @@ -0,0 +1,417 @@ +/* + * + * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_pl111.c + * PL111 specific functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "pl111_drm.h" + +/* This can't be called from IRQ context, due to clk_get() and board->enable */ +static int clcd_enable(struct drm_framebuffer *fb) +{ + __u32 cntl; + struct clcd_board *board; + + pr_info("DRM %s\n", __func__); + + clk_prepare_enable(priv.clk); + + /* Enable and Power Up */ + cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDPWR | CNTL_LCDVCOMP(1); + DRM_DEBUG_KMS("fb->bits_per_pixel = %d\n", fb->bits_per_pixel); + if (fb->bits_per_pixel == 16) + cntl |= CNTL_LCDBPP16_565; + else if (fb->bits_per_pixel == 32 && fb->depth == 24) + cntl |= CNTL_LCDBPP24; + else + BUG_ON(1); + + cntl |= CNTL_BGR; + + writel(cntl, priv.regs + CLCD_PL111_CNTL); + + if (priv.amba_dev->dev.platform_data) { + board = priv.amba_dev->dev.platform_data; + + if (board->enable) + board->enable(NULL); + } + + /* Enable Interrupts */ + writel(CLCD_IRQ_NEXTBASE_UPDATE, priv.regs + CLCD_PL111_IENB); + + return 0; +} + +int clcd_disable(struct drm_crtc *crtc) +{ + struct clcd_board *board; + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + unsigned long flags; +#endif + + pr_info("DRM %s\n", __func__); + + /* Disable Interrupts */ + writel(0x00000000, priv.regs + CLCD_PL111_IENB); + + if (priv.amba_dev->dev.platform_data) { + board = priv.amba_dev->dev.platform_data; + + if (board->disable) + board->disable(NULL); + } + + /* Disable and Power Down */ + writel(0, priv.regs + CLCD_PL111_CNTL); + + /* Disable clock */ + clk_disable_unprepare(priv.clk); + + pl111_crtc->last_bpp = 0; +#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS + spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); + /* Release the previous buffers */ + if (pl111_crtc->old_kds_res_set != NULL) + kds_resource_set_release(&pl111_crtc->old_kds_res_set); + + pl111_crtc->old_kds_res_set = NULL; + spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); +#endif + return 0; +} + +/* + * To avoid a possible race where "pl111_crtc->current_update_res" has + * been updated (non NULL) but the corresponding scanout buffer has not been + * written to the base registers we must always call this function holding + * the "base_update_lock" spinlock with IRQs disabled (spin_lock_irqsave()). + */ +void do_flip_to_res(struct pl111_drm_flip_resource *flip_res) +{ + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); + struct drm_framebuffer *fb; + struct pl111_gem_bo *bo; + size_t min_size; + fb = flip_res->fb; + bo = PL111_BO_FROM_FRAMEBUFFER(fb); + + + + min_size = (fb->height - 1) * fb->pitches[0] + + fb->width * (fb->bits_per_pixel >> 3); + + BUG_ON(bo->gem_object.size < min_size); + + /* Don't even attempt PL111_BOT_SHM, it's not contiguous */ + BUG_ON(bo->type != PL111_BOT_DMA); + + /* + * Note the buffer for releasing after IRQ, and don't allow any more + * updates until then. + * + * This clcd controller latches the new address on next vsync. Address + * latching is indicated by CLCD_IRQ_NEXTBASE_UPDATE, and so we must + * wait for that before releasing the previous buffer's kds + * resources. Otherwise, we'll allow writers to write to the old buffer + * whilst it is still being displayed + */ + pl111_crtc->current_update_res = flip_res; + + DRM_DEBUG_KMS("Displaying fb 0x%p, dumb_bo 0x%p, physaddr %.8x\n", + fb, bo, bo->backing_data.dma.fb_dev_addr); + + if (drm_vblank_get(pl111_crtc->crtc.dev, pl111_crtc->crtc_index) < 0) + DRM_ERROR("Could not get vblank reference for crtc %d\n", + pl111_crtc->crtc_index); + + /* Set the scanout buffer */ + writel(bo->backing_data.dma.fb_dev_addr, priv.regs + CLCD_UBAS); + writel(bo->backing_data.dma.fb_dev_addr + + ((fb->height - 1) * fb->pitches[0]), priv.regs + CLCD_LBAS); +} + +void +show_framebuffer_on_crtc_cb_internal(struct pl111_drm_flip_resource *flip_res, + struct drm_framebuffer *fb) +{ + unsigned long irq_flags; + struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); + + spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); + if (list_empty(&pl111_crtc->update_queue) && + !pl111_crtc->current_update_res) { + do_flip_to_res(flip_res); + } else { + /* + * Enqueue the update to occur on a future IRQ + * This only happens on triple-or-greater buffering + */ + DRM_DEBUG_KMS("Deferring 3+ buffered flip to fb %p to IRQ\n", + fb); + list_add_tail(&flip_res->link, &pl111_crtc->update_queue); + } + spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); + + if (!flip_res->page_flip && (pl111_crtc->last_bpp == 0 || + pl111_crtc->last_bpp != fb->bits_per_pixel || + !drm_mode_equal(pl111_crtc->new_mode, + pl111_crtc->current_mode))) { + struct clcd_regs timing; + + pl111_convert_drm_mode_to_timing(pl111_crtc->new_mode, &timing); + + DRM_DEBUG_KMS("Set timing: %08X:%08X:%08X:%08X clk=%ldHz\n", + timing.tim0, timing.tim1, timing.tim2, + timing.tim3, timing.pixclock); + + /* This is the actual mode setting part */ + clk_set_rate(priv.clk, timing.pixclock); + + writel(timing.tim0, priv.regs + CLCD_TIM0); + writel(timing.tim1, priv.regs + CLCD_TIM1); + writel(timing.tim2, priv.regs + CLCD_TIM2); + writel(timing.tim3, priv.regs + CLCD_TIM3); + + clcd_enable(fb); + pl111_crtc->last_bpp = fb->bits_per_pixel; + } + + if (!flip_res->page_flip) { + drm_mode_destroy(flip_res->crtc->dev, pl111_crtc->current_mode); + pl111_crtc->current_mode = pl111_crtc->new_mode; + pl111_crtc->new_mode = NULL; + } + + BUG_ON(!pl111_crtc->current_mode); + + /* + * If IRQs weren't enabled before, they are now. This will eventually + * cause flip_res to be released via pl111_common_irq, which updates + * every time the Base Address is latched (i.e. every frame, regardless + * of whether we update the base address or not) + */ +} + +irqreturn_t pl111_irq(int irq, void *data) +{ + u32 irq_stat; + struct pl111_drm_crtc *pl111_crtc = priv.pl111_crtc; + + irq_stat = readl(priv.regs + CLCD_PL111_MIS); + + if (!irq_stat) + return IRQ_NONE; + + if (irq_stat & CLCD_IRQ_NEXTBASE_UPDATE) + pl111_common_irq(pl111_crtc); + + /* Clear the interrupt once done */ + writel(irq_stat, priv.regs + CLCD_PL111_ICR); + + return IRQ_HANDLED; +} + +int pl111_device_init(struct drm_device *dev) +{ + struct pl111_drm_dev_private *priv = dev->dev_private; + int ret; + + if (priv == NULL) { + pr_err("%s no private data\n", __func__); + return -EINVAL; + } + + if (priv->amba_dev == NULL) { + pr_err("%s no amba device found\n", __func__); + return -EINVAL; + } + + /* set up MMIO for register access */ + priv->mmio_start = priv->amba_dev->res.start; + priv->mmio_len = resource_size(&priv->amba_dev->res); + + DRM_DEBUG_KMS("mmio_start=%lu, mmio_len=%u\n", priv->mmio_start, + priv->mmio_len); + + priv->regs = ioremap(priv->mmio_start, priv->mmio_len); + if (priv->regs == NULL) { + pr_err("%s failed mmio\n", __func__); + return -EINVAL; + } + + /* turn off interrupts */ + writel(0, priv->regs + CLCD_PL111_IENB); + + ret = request_irq(priv->amba_dev->irq[0], pl111_irq, 0, + "pl111_irq_handler", NULL); + if (ret != 0) { + pr_err("%s failed %d\n", __func__, ret); + goto out_mmio; + } + + goto finish; + +out_mmio: + iounmap(priv->regs); +finish: + DRM_DEBUG_KMS("pl111_device_init returned %d\n", ret); + return ret; +} + +void pl111_device_fini(struct drm_device *dev) +{ + struct pl111_drm_dev_private *priv = dev->dev_private; + u32 cntl; + + if (priv == NULL || priv->regs == NULL) + return; + + free_irq(priv->amba_dev->irq[0], NULL); + + cntl = readl(priv->regs + CLCD_PL111_CNTL); + + cntl &= ~CNTL_LCDEN; + writel(cntl, priv->regs + CLCD_PL111_CNTL); + + cntl &= ~CNTL_LCDPWR; + writel(cntl, priv->regs + CLCD_PL111_CNTL); + + iounmap(priv->regs); +} + +int pl111_amba_probe(struct amba_device *dev, const struct amba_id *id) +{ + struct clcd_board *board = dev->dev.platform_data; + int ret; + pr_info("DRM %s\n", __func__); + + if (!board) + dev_warn(&dev->dev, "board data not available\n"); + + ret = amba_request_regions(dev, NULL); + if (ret != 0) { + DRM_ERROR("CLCD: unable to reserve regs region\n"); + goto out; + } + + priv.amba_dev = dev; + + priv.clk = clk_get(&priv.amba_dev->dev, NULL); + if (IS_ERR(priv.clk)) { + DRM_ERROR("CLCD: unable to get clk.\n"); + ret = PTR_ERR(priv.clk); + goto clk_err; + } + + return 0; + +clk_err: + amba_release_regions(dev); +out: + return ret; +} + +int pl111_amba_remove(struct amba_device *dev) +{ + DRM_DEBUG_KMS("DRM %s\n", __func__); + + clk_put(priv.clk); + + amba_release_regions(dev); + + priv.amba_dev = NULL; + + return 0; +} + +void pl111_convert_drm_mode_to_timing(struct drm_display_mode *mode, + struct clcd_regs *timing) +{ + unsigned int ppl, hsw, hfp, hbp; + unsigned int lpp, vsw, vfp, vbp; + unsigned int cpl; + + memset(timing, 0, sizeof(struct clcd_regs)); + + ppl = (mode->hdisplay / 16) - 1; + hsw = mode->hsync_end - mode->hsync_start - 1; + hfp = mode->hsync_start - mode->hdisplay - 1; + hbp = mode->htotal - mode->hsync_end - 1; + + lpp = mode->vdisplay - 1; + vsw = mode->vsync_end - mode->vsync_start - 1; + vfp = mode->vsync_start - mode->vdisplay; + vbp = mode->vtotal - mode->vsync_end; + + cpl = mode->hdisplay - 1; + + timing->tim0 = (ppl << 2) | (hsw << 8) | (hfp << 16) | (hbp << 24); + timing->tim1 = lpp | (vsw << 10) | (vfp << 16) | (vbp << 24); + timing->tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC | TIM2_BCD | (cpl << 16); + timing->tim3 = 0; + + timing->pixclock = mode->clock * 1000; +} + +void pl111_convert_timing_to_drm_mode(struct clcd_regs *timing, + struct drm_display_mode *mode) +{ + unsigned int ppl, hsw, hfp, hbp; + unsigned int lpp, vsw, vfp, vbp; + + ppl = (timing->tim0 >> 2) & 0x3f; + hsw = (timing->tim0 >> 8) & 0xff; + hfp = (timing->tim0 >> 16) & 0xff; + hbp = (timing->tim0 >> 24) & 0xff; + + lpp = timing->tim1 & 0x3ff; + vsw = (timing->tim1 >> 10) & 0x3f; + vfp = (timing->tim1 >> 16) & 0xff; + vbp = (timing->tim1 >> 24) & 0xff; + + mode->hdisplay = (ppl + 1) * 16; + mode->hsync_start = ((ppl + 1) * 16) + hfp + 1; + mode->hsync_end = ((ppl + 1) * 16) + hfp + hsw + 2; + mode->htotal = ((ppl + 1) * 16) + hfp + hsw + hbp + 3; + mode->hskew = 0; + + mode->vdisplay = lpp + 1; + mode->vsync_start = lpp + vfp + 1; + mode->vsync_end = lpp + vfp + vsw + 2; + mode->vtotal = lpp + vfp + vsw + vbp + 2; + + mode->flags = 0; + + mode->width_mm = 0; + mode->height_mm = 0; + + mode->clock = timing->pixclock / 1000; + mode->hsync = timing->pixclock / mode->htotal; + mode->vrefresh = mode->hsync / mode->vtotal; +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c new file mode 100755 index 0000000..9d5ec0c --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c @@ -0,0 +1,151 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_platform.c + * Implementation of the Linux platform device entrypoints for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "pl111_drm.h" + +static int pl111_platform_drm_suspend(struct platform_device *dev, + pm_message_t state) +{ + pr_info("DRM %s\n", __func__); + return 0; +} + +static int pl111_platform_drm_resume(struct platform_device *dev) +{ + pr_info("DRM %s\n", __func__); + return 0; +} + +int pl111_platform_drm_probe(struct platform_device *dev) +{ + pr_info("DRM %s\n", __func__); + return pl111_drm_init(dev); +} + +static int pl111_platform_drm_remove(struct platform_device *dev) +{ + pr_info("DRM %s\n", __func__); + pl111_drm_exit(dev); + + return 0; +} + +static struct amba_id pl111_id_table[] = { + { + .id = 0x00041110, + .mask = 0x000ffffe, + }, + {0, 0}, +}; + +static struct amba_driver pl111_amba_driver = { + .drv = { + .name = "clcd-pl11x", + }, + .probe = pl111_amba_probe, + .remove = pl111_amba_remove, + .id_table = pl111_id_table, +}; + +static struct platform_driver platform_drm_driver = { + .probe = pl111_platform_drm_probe, + .remove = pl111_platform_drm_remove, + .suspend = pl111_platform_drm_suspend, + .resume = pl111_platform_drm_resume, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, +}; + +static const struct platform_device_info pl111_drm_pdevinfo = { + .name = DRIVER_NAME, + .id = -1, + .dma_mask = ~0UL +}; + +static struct platform_device *pl111_drm_device; + +static int __init pl111_platform_drm_init(void) +{ + int ret; + + pr_info("DRM %s\n", __func__); + + pl111_drm_device = platform_device_register_full(&pl111_drm_pdevinfo); + if (pl111_drm_device == NULL) { + pr_err("DRM platform_device_register_full() failed\n"); + return -ENOMEM; + } + + ret = amba_driver_register(&pl111_amba_driver); + if (ret != 0) { + pr_err("DRM amba_driver_register() failed %d\n", ret); + goto err_amba_reg; + } + + ret = platform_driver_register(&platform_drm_driver); + if (ret != 0) { + pr_err("DRM platform_driver_register() failed %d\n", ret); + goto err_pdrv_reg; + } + + return 0; + +err_pdrv_reg: + amba_driver_unregister(&pl111_amba_driver); +err_amba_reg: + platform_device_unregister(pl111_drm_device); + + return ret; +} + +static void __exit pl111_platform_drm_exit(void) +{ + pr_info("DRM %s\n", __func__); + + platform_device_unregister(pl111_drm_device); + amba_driver_unregister(&pl111_amba_driver); + platform_driver_unregister(&platform_drm_driver); +} + +#ifdef MODULE +module_init(pl111_platform_drm_init); +#else +late_initcall(pl111_platform_drm_init); +#endif +module_exit(pl111_platform_drm_exit); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE(DRIVER_LICENCE); +MODULE_ALIAS(DRIVER_ALIAS); diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c new file mode 100755 index 0000000..d033566 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c @@ -0,0 +1,43 @@ +/* + * + * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_suspend.c + * Implementation of the suspend/resume functions for PL111 DRM + */ + +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "pl111_drm.h" + +int pl111_drm_suspend(struct drm_device *dev, pm_message_t state) +{ + pr_info("DRM %s\n", __func__); + return 0; +} + +int pl111_drm_resume(struct drm_device *dev) +{ + pr_info("DRM %s\n", __func__); + return 0; +} diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c new file mode 100755 index 0000000..ff602ef --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c @@ -0,0 +1,308 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * pl111_drm_vma.c + * Implementation of the VM functions for PL111 DRM + */ +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/version.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <linux/module.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "pl111_drm.h" + +/* BEGIN drivers/staging/omapdrm/omap_gem_helpers.c */ +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + */ +static void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed) +{ + int i, npages; + struct pl111_gem_bo *bo; + npages = obj->size >> PAGE_SHIFT; + bo = PL111_BO_FROM_GEM(obj); + for (i = 0; i < npages; i++) { + if (dirty) + set_page_dirty(pages[i]); + if (accessed) + mark_page_accessed(pages[i]); + /* Undo the reference we took when populating the table */ + page_cache_release(pages[i]); + } + drm_free_large(pages); +} + +void put_pages(struct drm_gem_object *obj, struct page **pages) +{ + int i, npages; + struct pl111_gem_bo *bo; + npages = obj->size >> PAGE_SHIFT; + bo = PL111_BO_FROM_GEM(obj); + _drm_gem_put_pages(obj, pages, true, true); + if (bo->backing_data.shm.dma_addrs) { + for (i = 0; i < npages; i++) { + /* Filter pages unmapped because of CPU accesses */ + if (!bo->backing_data.shm.dma_addrs[i]) + continue; + if (!dma_mapping_error(obj->dev->dev, + bo->backing_data.shm.dma_addrs[i])) { + dma_unmap_page(obj->dev->dev, + bo->backing_data.shm.dma_addrs[i], + PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + } + kfree(bo->backing_data.shm.dma_addrs); + bo->backing_data.shm.dma_addrs = NULL; + } +} + +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * @obj: obj in question + * @gfpmask: gfp mask of requested pages + */ +static struct page **_drm_gem_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask) +{ + struct inode *inode; + struct address_space *mapping; + struct page *p, **pages; + int i, npages; + + /* This is the shared memory object that backs the GEM resource */ + inode = obj->filp->f_path.dentry->d_inode; + mapping = inode->i_mapping; + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + gfpmask |= mapping_gfp_mask(mapping); + + for (i = 0; i < npages; i++) { + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + + /* + * There is a hypothetical issue w/ drivers that require + * buffer memory in the low 4GB.. if the pages are un- + * pinned, and swapped out, they can end up swapped back + * in above 4GB. If pages are already in memory, then + * shmem_read_mapping_page_gfp will ignore the gfpmask, + * even if the already in-memory page disobeys the mask. + * + * It is only a theoretical issue today, because none of + * the devices with this limitation can be populated with + * enough memory to trigger the issue. But this BUG_ON() + * is here as a reminder in case the problem with + * shmem_read_mapping_page_gfp() isn't solved by the time + * it does become a real issue. + * + * See this thread: http://lkml.org/lkml/2011/7/11/238 + */ + BUG_ON((gfpmask & __GFP_DMA32) && + (page_to_pfn(p) >= 0x00100000UL)); + } + + return pages; + +fail: + while (i--) + page_cache_release(pages[i]); + + drm_free_large(pages); + return ERR_PTR(PTR_ERR(p)); +} + +struct page **get_pages(struct drm_gem_object *obj) +{ + struct pl111_gem_bo *bo; + bo = PL111_BO_FROM_GEM(obj); + + if (bo->backing_data.shm.pages == NULL) { + struct page **p; + int npages = obj->size >> PAGE_SHIFT; + int i; + + p = _drm_gem_get_pages(obj, GFP_KERNEL); + if (IS_ERR(p)) + return ERR_PTR(-ENOMEM); + + bo->backing_data.shm.pages = p; + + if (bo->backing_data.shm.dma_addrs == NULL) { + bo->backing_data.shm.dma_addrs = + kzalloc(npages * sizeof(dma_addr_t), + GFP_KERNEL); + if (bo->backing_data.shm.dma_addrs == NULL) + goto error_out; + } + + if (!(bo->type & PL111_BOT_CACHED)) { + for (i = 0; i < npages; ++i) { + bo->backing_data.shm.dma_addrs[i] = + dma_map_page(obj->dev->dev, p[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(obj->dev->dev, + bo->backing_data.shm.dma_addrs[i])) + goto error_out; + } + } + } + + return bo->backing_data.shm.pages; + +error_out: + put_pages(obj, bo->backing_data.shm.pages); + bo->backing_data.shm.pages = NULL; + return ERR_PTR(-ENOMEM); +} + +/* END drivers/staging/omapdrm/omap_gem_helpers.c */ +int pl111_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page **pages; + unsigned long pfn; + struct drm_gem_object *obj = vma->vm_private_data; + struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); + struct drm_device *dev = obj->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + + /* + * Our mmap calls setup a valid vma->vm_pgoff + * so we can use vmf->pgoff + */ + + if (bo->type & PL111_BOT_DMA) { + pfn = (bo->backing_data.dma.fb_dev_addr >> PAGE_SHIFT) + + vmf->pgoff; + } else { /* PL111_BOT_SHM */ + pages = get_pages(obj); + if (IS_ERR(pages)) { + dev_err(obj->dev->dev, + "could not get pages: %ld\n", PTR_ERR(pages)); + ret = PTR_ERR(pages); + goto error; + } + pfn = page_to_pfn(pages[vmf->pgoff]); + pl111_gem_sync_to_cpu(bo, vmf->pgoff); + } + + DRM_DEBUG("bo=%p physaddr=0x%.8x for offset 0x%x\n", + bo, PFN_PHYS(pfn), PFN_PHYS(vmf->pgoff)); + + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + +error: + mutex_unlock(&dev->struct_mutex); + + switch (ret) { + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +/* + * The core drm_vm_ functions in kernel 3.4 are not ready + * to handle dma_buf cases where vma->vm_file->private_data + * cannot be accessed to get the device. + * + * We use these functions from 3.5 instead where the device + * pointer is passed explicitly. + * + * However they aren't exported from the kernel until 3.10 + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +void pl111_drm_vm_open_locked(struct drm_device *dev, + struct vm_area_struct *vma) +{ + struct drm_vma_entry *vma_entry; + + DRM_DEBUG("0x%08lx,0x%08lx\n", + vma->vm_start, vma->vm_end - vma->vm_start); + atomic_inc(&dev->vma_count); + + vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); + if (vma_entry) { + vma_entry->vma = vma; + vma_entry->pid = current->pid; + list_add(&vma_entry->head, &dev->vmalist); + } +} + +void pl111_drm_vm_close_locked(struct drm_device *dev, + struct vm_area_struct *vma) +{ + struct drm_vma_entry *pt, *temp; + + DRM_DEBUG("0x%08lx,0x%08lx\n", + vma->vm_start, vma->vm_end - vma->vm_start); + atomic_dec(&dev->vma_count); + + list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { + if (pt->vma == vma) { + list_del(&pt->head); + kfree(pt); + break; + } + } +} + +void pl111_gem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + drm_gem_object_reference(obj); + + mutex_lock(&obj->dev->struct_mutex); + pl111_drm_vm_open_locked(obj->dev, vma); + mutex_unlock(&obj->dev->struct_mutex); +} + +void pl111_gem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + pl111_drm_vm_close_locked(obj->dev, vma); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); +} +#endif diff --git a/driver/product/kernel/drivers/gpu/drm/pl111/sconscript b/driver/product/kernel/drivers/gpu/drm/pl111/sconscript new file mode 100755 index 0000000..5c47de7 --- /dev/null +++ b/driver/product/kernel/drivers/gpu/drm/pl111/sconscript @@ -0,0 +1,52 @@ +# +# (C) COPYRIGHT 2010-2013, 2015-2016 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +import os +import shutil +Import('env') + +# Generate a build environment for the integration tests, taking a copy of the top-level build environment +# (env) as a start. +drm_env = env.Clone() + +# Xorg uses C++ style comments and 'inline' keyword +if '-std=c89' in drm_env['CFLAGS']: + drm_env['CFLAGS'].remove('-std=c89') + +# X11 generates a lot of warnings +if '-Werror' in drm_env['CCFLAGS']: + drm_env['CCFLAGS'].remove('-Werror') + +#remove the 'lib'prefix +drm_env['LIBPREFIX'] = '' + +src = Glob('*.c') + +if drm_env.GetOption('clean') : + drm_env.Execute(Action("make clean", 'clean [pl111]')) + cmd = drm_env.Command('$STATIC_LIB_PATH/mali_drm.ko', src, []) +else: + # The target is mali_drm.ko, built from the source in pl111_drm/pl111, via the action makeAction + # mali_drm.ko will be copied to $STATIC_LIB_PATH after being built by the standard Linux + # kernel build system, after which it can be installed to the directory specified if + # "libs_install" is set; this is done by LibTarget. + makeAction=Action("cd ${SOURCE.dir} && make MALI_DEBUG=${debug} && cp pl111_drm.ko $STATIC_LIB_PATH/mali_drm.ko", '$MAKECOMSTR') + cmd = drm_env.Command('$STATIC_LIB_PATH/mali_drm.ko', src, [makeAction]) + +# need Module.symvers from drm.ko +#drm_env.Depends('$STATIC_LIB_PATH/pl111_drm.ko', '$STATIC_LIB_PATH/drm.ko') + +drm_env.KernelObjTarget('x11', cmd) + |