aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c26
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c26
-rw-r--r--drivers/gpu/drm/drm_crtc.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c9
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c27
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c251
-rw-r--r--drivers/gpu/drm/drm_pci.c54
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c35
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c25
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig25
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile24
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c298
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c657
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c125
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h188
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c991
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h141
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c610
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h333
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c170
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c472
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c399
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c1511
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c225
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c322
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c450
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c703
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h95
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h328
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c26
51 files changed, 8582 insertions, 299 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ed9e3af17b3..0ce5f52ac56 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -215,3 +215,5 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81cfcb..b6b43cbc18e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,4 +50,5 @@ obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3e6584b940d..34931fe7d2c 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -40,6 +40,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include "ast_drv.h"
static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -145,9 +146,10 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
return ret;
}
-static int astfb_create(struct ast_fbdev *afbdev,
+static int astfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;
@@ -248,26 +250,10 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static int ast_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = astfb_create(afbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
- .fb_probe = ast_find_or_create_single,
+ .fb_probe = astfb_create,
};
static void ast_fbdev_destroy(struct drm_device *dev,
@@ -314,6 +300,10 @@ int ast_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&afbdev->helper, 32);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 3daea0f638c..e25afccaf85 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
return ret;
}
-static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
@@ -219,23 +221,6 @@ out_iounmap:
return ret;
}
-static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = cirrusfb_create(gfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
@@ -267,7 +252,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
- .fb_probe = cirrus_fb_find_or_create_single,
+ .fb_probe = cirrusfb_create,
};
int cirrus_fbdev_init(struct cirrus_device *cdev)
@@ -291,6 +276,9 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(cdev->dev);
drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 826a5ca3595..781aef524b3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -68,9 +68,23 @@ void drm_modeset_unlock_all(struct drm_device *dev)
mutex_unlock(&dev->mode_config.mutex);
}
-
EXPORT_SYMBOL(drm_modeset_unlock_all);
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
@@ -1982,9 +1996,9 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
plane_req->src_w, plane_req->src_h);
if (!ret) {
old_fb = plane->fb;
- fb = NULL;
plane->crtc = crtc;
plane->fb = fb;
+ fb = NULL;
}
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 51324256a65..67aa0dd6825 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -354,10 +354,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
break;
}
}
- if (i == 4)
+
+ if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
+
+ connector->bad_edid_counter++;
+ }
}
if (valid_extensions != block[0x7e]) {
@@ -2050,7 +2054,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 3742bc96421..1c8549dae99 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -275,23 +275,8 @@ err_drm_gem_cma_free_object:
return ret;
}
-static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- if (!helper->fb) {
- ret = drm_fbdev_cma_create(helper, sizes);
- if (ret < 0)
- return ret;
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_probe,
+ .fb_probe = drm_fbdev_cma_create,
};
/**
@@ -333,6 +318,9 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, preferred_bpp);
if (ret < 0) {
dev_err(dev->dev, "Failed to set inital hw configuration.\n");
@@ -389,8 +377,13 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma)
+ if (fbdev_cma) {
+ struct drm_device *dev = fbdev_cma->fb_helper.dev;
+
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+ drm_modeset_unlock_all(dev);
+ }
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0c6e25e979d..59d6b9bf204 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
*/
-/* simple single crtc case helper function */
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -163,6 +190,10 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
+/**
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -208,6 +239,10 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
return NULL;
}
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -239,10 +274,21 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
bool error = false;
int i, ret;
+
+ drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
ret = drm_mode_set_config_internal(mode_set);
@@ -253,6 +299,10 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
@@ -272,7 +322,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
return error;
}
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
/*
@@ -285,26 +335,11 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
-EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
-{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
-}
-EXPORT_SYMBOL(drm_fb_helper_restore);
-
static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -326,7 +361,10 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- drm_fb_helper_restore();
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
@@ -353,6 +391,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
@@ -378,6 +424,11 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
drm_modeset_unlock_all(dev);
}
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
@@ -421,6 +472,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->crtc_info);
}
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
@@ -549,6 +618,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -588,6 +662,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -680,13 +759,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
-/* this will let fbcon do the mode init */
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
int ret;
int i;
@@ -697,7 +782,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
drm_modeset_unlock_all(dev);
@@ -714,6 +798,11 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -751,10 +840,15 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- int new_fb = 0;
+ int ret = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
@@ -832,27 +926,30 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
/* push down into drivers */
- new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
- if (new_fb < 0)
- return new_fb;
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
info = fb_helper->fbdev;
- /* set the fb pointer */
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
for (i = 0; i < fb_helper->crtc_count; i++)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- if (new_fb) {
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
- } else {
- drm_fb_helper_set_par(info);
- }
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
@@ -862,13 +959,25 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- if (new_fb)
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -889,6 +998,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1312,6 +1435,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
+ modeset->fb = NULL;
}
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -1328,9 +1452,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
}
}
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
out:
kfree(crtcs);
kfree(modes);
@@ -1338,18 +1474,23 @@ out:
}
/**
- * drm_helper_initial_config - setup a sane initial connector configuration
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
- * LOCKING:
- * Called at init time by the driver to set up the @fb_helper initial
- * configuration, must take the mode config lock.
- *
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
@@ -1358,9 +1499,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(fb_helper->dev);
-
drm_fb_helper_parse_command_line(fb_helper);
count = drm_fb_helper_probe_connector_modes(fb_helper,
@@ -1383,12 +1521,17 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
- * LOCKING:
- * Called at runtime, must take mode config lock.
- *
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
@@ -1418,7 +1561,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ drm_fb_helper_set_par(fb_helper->fbdev);
+
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 86102a08f65..bd719e936e1 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -439,33 +439,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return 0;
}
-#else
-
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
-#endif
-
-EXPORT_SYMBOL(drm_pci_init);
-
-/*@}*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(pdriver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_pci_exit);
-
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
@@ -503,3 +476,30 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ return -1;
+}
+
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 90d335cfb8c..68f0045f86b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -226,36 +226,8 @@ out:
return ret;
}
-static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * with !helper->fb, it means that this funcion is called first time
- * and after that, the helper->fb would be used as clone mode.
- */
- if (!helper->fb) {
- ret = exynos_drm_fbdev_create(helper, sizes);
- if (ret < 0) {
- DRM_ERROR("failed to create fbdev.\n");
- return ret;
- }
-
- /*
- * fb_helper expects a value more than 1 if succeed
- * because register_framebuffer() should be called.
- */
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
- .fb_probe = exynos_drm_fbdev_probe,
+ .fb_probe = exynos_drm_fbdev_create,
};
int exynos_drm_fbdev_init(struct drm_device *dev)
@@ -295,6 +267,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_ERROR("failed to set up hw configuration.\n");
@@ -376,5 +351,7 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index c1ef37e2efd..2590cac8425 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -545,9 +545,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- int new_fb = 0;
int bytespp;
- int ret;
bytespp = sizes->surface_bpp / 8;
if (bytespp == 3) /* no 24bit packed */
@@ -562,13 +560,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
sizes->surface_depth = 16;
}
- if (!helper->fb) {
- ret = psbfb_create(psb_fbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
+ return psbfb_create(psb_fbdev, sizes);
}
static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
@@ -616,6 +608,10 @@ int psb_fbdev_init(struct drm_device *dev)
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index cfc96878d74..969d08c72d1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -685,7 +685,6 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 816c45c71b7..d64af5aa4a1 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1489,7 +1489,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6eb3882ba9b..6337196b793 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3727,10 +3727,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, enable);
}
-static void intel_crtc_noop(struct drm_crtc *crtc)
-{
-}
-
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3779,10 +3775,6 @@ void intel_modeset_disable(struct drm_device *dev)
}
}
-void intel_encoder_noop(struct drm_encoder *encoder)
-{
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -7356,7 +7348,6 @@ free_work:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_noop,
};
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
@@ -8066,14 +8057,9 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
- if (!set->mode)
- set->fb = NULL;
-
- /* The fb helper likes to play gross jokes with ->mode_set_config.
- * Unfortunately the crtc helper doesn't do much at all for this case,
- * so we have to cope with this madness until the fb helper is fixed up. */
- if (set->fb && set->num_connectors == 0)
- return 0;
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7b8bfe8982e..31c0205685a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2561,7 +2561,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 005a91f1f8f..d282052aadd 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -525,7 +525,6 @@ extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5..00e70dbe82d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 1c510da04d1..981bdce3634 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -57,9 +57,10 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct intel_fbdev *ifbdev,
+static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
@@ -181,26 +182,10 @@ out:
return ret;
}
-static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = intelfb_create(ifbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
- .fb_probe = intel_fb_find_or_create_single,
+ .fb_probe = intelfb_create,
};
static void intel_fbdev_destroy(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5a6138c62fe..3ea0c8b6a00 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -972,7 +972,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c7154bfa54c..3d1d97488cc 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -663,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index f01063a2323..33b46d9694e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2043,7 +2043,6 @@ done:
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.mode_fixup = intel_sdvo_mode_fixup,
.mode_set = intel_sdvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 984a113c5d1..d808421c1c8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1487,7 +1487,6 @@ out:
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.mode_fixup = intel_tv_mode_fixup,
.mode_set = intel_tv_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 5c69b432f99..d2253f63948 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
return ret;
}
-static int mgag200fb_create(struct mga_fbdev *mfbdev,
+static int mgag200fb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
@@ -209,23 +211,6 @@ out:
return ret;
}
-static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = mgag200fb_create(mfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
@@ -256,7 +241,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
- .fb_probe = mga_fb_find_or_create_single,
+ .fb_probe = mgag200fb_create,
};
int mgag200_fbdev_init(struct mga_device *mdev)
@@ -278,6 +263,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(mdev->dev);
+
drm_fb_helper_initial_config(&mfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 64d6e3047de..2f2741483b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include <core/engine.h>
+#include <linux/swiotlb.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index d4ecb4deb48..b0353178158 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -251,9 +251,10 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
}
static int
-nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
+nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
@@ -388,23 +389,6 @@ out:
return ret;
}
-static int
-nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = nouveau_fbcon_create(fbcon, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
@@ -450,7 +434,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
- .fb_probe = nouveau_fbcon_find_or_create_single,
+ .fb_probe = nouveau_fbcon_create,
};
@@ -491,6 +475,9 @@ nouveau_fbcon_init(struct drm_device *dev)
else
preferred_bpp = 32;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
new file mode 100644
index 00000000000..b724a413143
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+ tristate "OMAP DRM"
+ depends on DRM && !CONFIG_FB_OMAP2
+ depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+ select DRM_KMS_HELPER
+ select OMAP2_DSS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default n
+ help
+ DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+ int "Number of CRTCs"
+ range 1 10
+ default 1 if ARCH_OMAP2 || ARCH_OMAP3
+ default 2 if ARCH_OMAP4
+ depends on DRM_OMAP
+ help
+ Select the number of video overlays which can be used as framebuffers.
+ The remaining overlays are reserved for video.
+
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
new file mode 100644
index 00000000000..d85e058f284
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o \
+ omap_irq.o \
+ omap_debugfs.o \
+ omap_crtc.o \
+ omap_plane.o \
+ omap_encoder.o \
+ omap_connector.o \
+ omap_fb.o \
+ omap_fbdev.o \
+ omap_gem.o \
+ omap_gem_dmabuf.o \
+ omap_dmm_tiler.o \
+ tcm-sita.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 00000000000..4d8c18aa5dd
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
+TODO
+. Where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd.
+ . Use mm_shrinker to trigger unpinning pages.
+ . This is mainly theoretical since most of these devices don't actually
+ have swap or harddrive.
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+ . This can be handled by the dma-buf fence/reservation stuff when it
+ lands
+
+Userspace:
+. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
+. OMAP5432 uEVM
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
new file mode 100644
index 00000000000..44284fd981f
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -0,0 +1,298 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+ struct drm_connector base;
+ struct omap_dss_device *dssdev;
+ struct drm_encoder *encoder;
+};
+
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings)
+{
+ mode->clock = timings->pixel_clock;
+
+ mode->hdisplay = timings->x_res;
+ mode->hsync_start = mode->hdisplay + timings->hfp;
+ mode->hsync_end = mode->hsync_start + timings->hsw;
+ mode->htotal = mode->hsync_end + timings->hbp;
+
+ mode->vdisplay = timings->y_res;
+ mode->vsync_start = mode->vdisplay + timings->vfp;
+ mode->vsync_end = mode->vsync_start + timings->vsw;
+ mode->vtotal = mode->vsync_end + timings->vbp;
+
+ mode->flags = 0;
+
+ if (timings->interlace)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+}
+
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode)
+{
+ timings->pixel_clock = mode->clock;
+
+ timings->x_res = mode->hdisplay;
+ timings->hfp = mode->hsync_start - mode->hdisplay;
+ timings->hsw = mode->hsync_end - mode->hsync_start;
+ timings->hbp = mode->htotal - mode->hsync_end;
+
+ timings->y_res = mode->vdisplay;
+ timings->vfp = mode->vsync_start - mode->vdisplay;
+ timings->vsw = mode->vsync_end - mode->vsync_start;
+ timings->vbp = mode->vtotal - mode->vsync_end;
+
+ timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ else
+ timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ else
+ timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+ timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+}
+
+static enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum drm_connector_status ret;
+
+ if (dssdrv->detect) {
+ if (dssdrv->detect(dssdev))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ } else {
+ ret = connector_status_unknown;
+ }
+
+ VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+ return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+ DBG("%s", omap_connector->dssdev->name);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+
+ omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID 512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct drm_device *dev = connector->dev;
+ int n = 0;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ /* if display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.. otherwise (ie. fixed resolution
+ * LCD panels) we just return a single mode corresponding to the
+ * currently configured timings:
+ */
+ if (dssdrv->read_edid) {
+ void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+ if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+ drm_edid_is_valid(edid)) {
+ drm_mode_connector_update_edid_property(
+ connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ } else {
+ drm_mode_connector_update_edid_property(
+ connector, NULL);
+ }
+ kfree(edid);
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct omap_video_timings timings = {0};
+
+ dssdrv->get_timings(dssdev, &timings);
+
+ copy_timings_omap_to_drm(mode, &timings);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ n = 1;
+ }
+
+ return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings = {0};
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *new_mode;
+ int ret = MODE_BAD;
+
+ copy_timings_drm_to_omap(&timings, mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ if (!dssdrv->check_timings(dssdev, &timings)) {
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ new_mode->clock = timings.pixel_clock;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+ }
+
+ DBG("connector: mode %s: "
+ "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ (ret == MODE_OK) ? "valid" : "invalid",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ return omap_connector->encoder;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = omap_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+ .get_modes = omap_connector_get_modes,
+ .mode_valid = omap_connector_mode_valid,
+ .best_encoder = omap_connector_attached_encoder,
+};
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ /* TODO: enable when supported in dss */
+ VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = NULL;
+ struct omap_connector *omap_connector;
+
+ DBG("%s", dssdev->name);
+
+ omap_dss_get_device(dssdev);
+
+ omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ if (!omap_connector) {
+ dev_err(dev->dev, "could not allocate connector\n");
+ goto fail;
+ }
+
+ omap_connector->dssdev = dssdev;
+ omap_connector->encoder = encoder;
+
+ connector = &omap_connector->base;
+
+ drm_connector_init(dev, connector, &omap_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+ connector->polled = 0;
+ else
+#endif
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ if (connector)
+ omap_connector_destroy(connector);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
new file mode 100644
index 00000000000..ac2258f5980
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -0,0 +1,657 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+ struct drm_crtc base;
+ struct drm_plane *plane;
+
+ const char *name;
+ int pipe;
+ enum omap_channel channel;
+ struct omap_overlay_manager_info info;
+
+ /*
+ * Temporary: eventually this will go away, but it is needed
+ * for now to keep the output's happy. (They only need
+ * mgr->id.) Eventually this will be replaced w/ something
+ * more common-panel-framework-y
+ */
+ struct omap_overlay_manager mgr;
+
+ struct omap_video_timings timings;
+ bool enabled;
+ bool full_update;
+
+ struct omap_drm_apply apply;
+
+ struct omap_drm_irq apply_irq;
+ struct omap_drm_irq error_irq;
+
+ /* list of in-progress apply's: */
+ struct list_head pending_applies;
+
+ /* list of queued apply's: */
+ struct list_head queued_applies;
+
+ /* for handling queued and in-progress applies: */
+ struct work_struct apply_work;
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct drm_framebuffer *old_fb;
+
+ /* for handling page flips without caring about what
+ * the callback is called from. Possibly we should just
+ * make omap_gem always call the cb from the worker so
+ * we don't have to care about this..
+ *
+ * XXX maybe fold into apply_work??
+ */
+ struct work_struct page_flip_work;
+};
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+ return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ omap_crtc->timings = *timings;
+ omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+};
+
+/*
+ * CRTC funcs:
+ */
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s", omap_crtc->name);
+
+ WARN_ON(omap_crtc->apply_irq.registered);
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+ omap_crtc->plane->funcs->destroy(omap_crtc->plane);
+ drm_crtc_cleanup(crtc);
+
+ kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+ int i;
+
+ DBG("%s: %d", omap_crtc->name, mode);
+
+ if (enabled != omap_crtc->enabled) {
+ omap_crtc->enabled = enabled;
+ omap_crtc->full_update = true;
+ omap_crtc_apply(crtc, &omap_crtc->apply);
+
+ /* also enable our private plane: */
+ WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+ /* and any attached overlay planes: */
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+ if (plane->crtc == crtc)
+ WARN_ON(omap_plane_dpms(plane, mode));
+ }
+ }
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ omap_crtc->full_update = true;
+
+ return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_plane *plane = omap_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+
+ return omap_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vblank_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* wakeup userspace */
+ if (omap_crtc->event)
+ drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+
+ omap_crtc->event = NULL;
+ omap_crtc->old_fb = NULL;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void page_flip_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, page_flip_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_display_mode *mode = &crtc->mode;
+ struct drm_gem_object *bo;
+
+ mutex_lock(&crtc->mutex);
+ omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x << 16, crtc->y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ vblank_cb, crtc);
+ mutex_unlock(&crtc->mutex);
+
+ bo = omap_framebuffer_bo(crtc->fb, 0);
+ drm_gem_object_unreference_unlocked(bo);
+}
+
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ /* avoid assumptions about what ctxt we are called from: */
+ queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_gem_object *bo;
+
+ DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+ fb->base.id, event);
+
+ if (omap_crtc->old_fb) {
+ dev_err(dev->dev, "already a pending flip\n");
+ return -EINVAL;
+ }
+
+ omap_crtc->event = event;
+ crtc->fb = fb;
+
+ /*
+ * Hold a reference temporarily until the crtc is updated
+ * and takes the reference to the bo. This avoids it
+ * getting freed from under us:
+ */
+ bo = omap_framebuffer_bo(fb, 0);
+ drm_gem_object_reference(bo);
+
+ omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc);
+
+ return 0;
+}
+
+static int omap_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ if (property == priv->rotation_prop) {
+ crtc->invert_dimensions =
+ !!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
+ }
+
+ return omap_plane_set_property(omap_crtc->plane, property, val);
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = omap_crtc_destroy,
+ .page_flip = omap_crtc_page_flip_locked,
+ .set_property = omap_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+ .dpms = omap_crtc_dpms,
+ .mode_fixup = omap_crtc_mode_fixup,
+ .mode_set = omap_crtc_mode_set,
+ .prepare = omap_crtc_prepare,
+ .commit = omap_crtc_commit,
+ .mode_set_base = omap_crtc_mode_set_base,
+ .load_lut = omap_crtc_load_lut,
+};
+
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+ /* avoid getting in a flood, unregister the irq until next vblank */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!omap_crtc->error_irq.registered)
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ mutex_lock(&crtc->mutex);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ dispc_mgr_go(channel);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ mutex_unlock(&crtc->mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait = NULL;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ /* ignore sync-lost irqs during enable/disable */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+ if (dispc_mgr_get_framedone_irq(channel)) {
+ if (!enable) {
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_framedone_irq(channel), 1);
+ }
+ } else {
+ /*
+ * When we disable digit output, we need to wait until fields
+ * are done. Otherwise the DSS is still working, and turning
+ * off the clocks prevents DSS from going to OFF mode. And when
+ * enabling, we need to wait for the extra sync losts
+ */
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_vsync_irq(channel), 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ if (wait) {
+ int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+ }
+
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_encoder *encoder = NULL;
+
+ DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+ omap_crtc->enabled, omap_crtc->full_update);
+
+ if (omap_crtc->full_update) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ int i;
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+ }
+
+ if (!omap_crtc->enabled) {
+ set_enabled(&omap_crtc->base, false);
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, &omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ omap_crtc->full_update = false;
+ }
+
+ dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+ dispc_mgr_set_timings(omap_crtc->channel,
+ &omap_crtc->timings);
+ set_enabled(&omap_crtc->base, true);
+ }
+
+ omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, enum omap_channel channel, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct omap_crtc *omap_crtc;
+ struct omap_overlay_manager_info *info;
+
+ DBG("%s", channel_names[channel]);
+
+ omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+ if (!omap_crtc) {
+ dev_err(dev->dev, "could not allocate CRTC\n");
+ goto fail;
+ }
+
+ crtc = &omap_crtc->base;
+
+ INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+ INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+ INIT_LIST_HEAD(&omap_crtc->pending_applies);
+ INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+ omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
+ omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+ omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+ omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+ omap_crtc->error_irq.irqmask =
+ dispc_mgr_get_sync_lost_irq(channel);
+ omap_crtc->error_irq.irq = omap_crtc_error_irq;
+ omap_irq_register(dev, &omap_crtc->error_irq);
+
+ omap_crtc->channel = channel;
+ omap_crtc->plane = plane;
+ omap_crtc->plane->crtc = crtc;
+ omap_crtc->name = channel_names[channel];
+ omap_crtc->pipe = id;
+
+ /* temporary: */
+ omap_crtc->mgr.id = channel;
+
+ dss_install_mgr_ops(&mgr_ops);
+
+ /* TODO: fix hard-coded setup.. add properties! */
+ info = &omap_crtc->info;
+ info->default_color = 0x00000000;
+ info->trans_key = 0x00000000;
+ info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info->trans_enabled = false;
+
+ drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+ omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ omap_crtc_destroy(crtc);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
new file mode 100644
index 00000000000..c27f59da7f2
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -0,0 +1,125 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_debugfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#include "drm_fb_helper.h"
+
+
+#ifdef CONFIG_DEBUG_FS
+
+static int gem_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "All Objects:\n");
+ omap_gem_describe_objects(&priv->obj_list, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb;
+
+ seq_printf(m, "fbcon ");
+ omap_framebuffer_describe(priv->fbdev->fb, m);
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == priv->fbdev->fb)
+ continue;
+
+ seq_printf(m, "user ");
+ omap_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+/* list of debufs files that are applicable to all devices */
+static struct drm_info_list omap_debugfs_list[] = {
+ {"gem", gem_show, 0},
+ {"mm", mm_show, 0},
+ {"fb", fb_show, 0},
+};
+
+/* list of debugfs files that are specific to devices with dmm/tiler */
+static struct drm_info_list omap_dmm_debugfs_list[] = {
+ {"tiler_map", tiler_map_show, 0},
+};
+
+int omap_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_debugfs_list\n");
+ return ret;
+ }
+
+ if (dmm_is_available())
+ ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void omap_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list), minor);
+ if (dmm_is_available())
+ drm_debugfs_remove_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list), minor);
+}
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
new file mode 100644
index 00000000000..58bcd6ae025
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -0,0 +1,188 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_PRIV_H
+#define OMAP_DMM_PRIV_H
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_DESCR__1 0x510
+#define DMM_PAT_DESCR__2 0x520
+#define DMM_PAT_DESCR__3 0x530
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+#define DMM_IRQSTAT_DST (1<<0)
+#define DMM_IRQSTAT_LST (1<<1)
+#define DMM_IRQSTAT_ERR_INV_DSC (1<<2)
+#define DMM_IRQSTAT_ERR_INV_DATA (1<<3)
+#define DMM_IRQSTAT_ERR_UPD_AREA (1<<4)
+#define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5)
+#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
+#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
+
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
+ DMM_IRQ_STAT_ERR_INV_DATA | \
+ DMM_IRQ_STAT_ERR_UPD_AREA | \
+ DMM_IRQ_STAT_ERR_UPD_CTRL | \
+ DMM_IRQ_STAT_ERR_UPD_DATA | \
+ DMM_IRQ_STAT_ERR_LUT_MISS)
+
+#define DMM_PATSTATUS_READY (1<<0)
+#define DMM_PATSTATUS_VALID (1<<1)
+#define DMM_PATSTATUS_RUN (1<<2)
+#define DMM_PATSTATUS_DONE (1<<3)
+#define DMM_PATSTATUS_LINKED (1<<4)
+#define DMM_PATSTATUS_BYPASSED (1<<7)
+#define DMM_PATSTATUS_ERR_INV_DESCR (1<<10)
+#define DMM_PATSTATUS_ERR_INV_DATA (1<<11)
+#define DMM_PATSTATUS_ERR_UPD_AREA (1<<12)
+#define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13)
+#define DMM_PATSTATUS_ERR_UPD_DATA (1<<14)
+#define DMM_PATSTATUS_ERR_ACCESS (1<<15)
+
+/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
+#define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \
+ DMM_PATSTATUS_ERR_INV_DATA | \
+ DMM_PATSTATUS_ERR_UPD_AREA | \
+ DMM_PATSTATUS_ERR_UPD_CTRL | \
+ DMM_PATSTATUS_ERR_UPD_DATA)
+
+
+
+enum {
+ PAT_STATUS,
+ PAT_DESCR
+};
+
+struct pat_ctrl {
+ u32 start:4;
+ u32 dir:4;
+ u32 lut_id:8;
+ u32 sync:12;
+ u32 ini:4;
+};
+
+struct pat {
+ uint32_t next_pa;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ uint32_t data_pa;
+};
+
+#define DMM_FIXED_RETRY_COUNT 1000
+
+/* create refill buffer big enough to refill all slots, plus 3 descriptors..
+ * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
+ * but I guess you don't hit that worst case at the same time as full area
+ * refill
+ */
+#define DESCR_SIZE 128
+#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+
+/* For OMAP5, a fixed offset is added to all Y coordinates for 1D buffers.
+ * This is used in programming to address the upper portion of the LUT
+*/
+#define OMAP5_LUT_OFFSET 128
+
+struct dmm;
+
+struct dmm_txn {
+ void *engine_handle;
+ struct tcm *tcm;
+
+ uint8_t *current_va;
+ dma_addr_t current_pa;
+
+ struct pat *last_pat;
+};
+
+struct refill_engine {
+ int id;
+ struct dmm *dmm;
+ struct tcm *tcm;
+
+ uint8_t *refill_va;
+ dma_addr_t refill_pa;
+
+ /* only one trans per engine for now */
+ struct dmm_txn txn;
+
+ bool async;
+
+ wait_queue_head_t wait_for_refill;
+
+ struct list_head idle_node;
+};
+
+struct dmm {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ struct page *dummy_page;
+ dma_addr_t dummy_pa;
+
+ void *refill_va;
+ dma_addr_t refill_pa;
+
+ /* refill engines */
+ wait_queue_head_t engine_queue;
+ struct list_head idle_head;
+ struct refill_engine *engines;
+ int num_engines;
+ atomic_t engine_counter;
+
+ /* container information */
+ int container_width;
+ int container_height;
+ int lut_width;
+ int lut_height;
+ int num_lut;
+
+ /* array of LUT - TCM containers */
+ struct tcm **tcm;
+
+ /* allocation list and lock */
+ struct list_head alloc_head;
+};
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
new file mode 100644
index 00000000000..39102153710
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -0,0 +1,991 @@
+/*
+ * DMM IOMMU driver support functions for TI OMAP processors.
+ *
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/list.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_dmm_priv.h"
+
+#define DMM_DRIVER_NAME "dmm"
+
+/* mappings for associating views to luts */
+static struct tcm *containers[TILFMT_NFORMATS];
+static struct dmm *omap_dmm;
+
+/* global spinlock for protecting lists */
+static DEFINE_SPINLOCK(list_lock);
+
+/* Geometry table */
+#define GEOM(xshift, yshift, bytes_per_pixel) { \
+ .x_shft = (xshift), \
+ .y_shft = (yshift), \
+ .cpp = (bytes_per_pixel), \
+ .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+ .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+ }
+
+static const struct {
+ uint32_t x_shft; /* unused X-bits (as part of bpp) */
+ uint32_t y_shft; /* unused Y-bits (as part of bpp) */
+ uint32_t cpp; /* bytes/chars per pixel */
+ uint32_t slot_w; /* width of each slot (in pixels) */
+ uint32_t slot_h; /* height of each slot (in pixels) */
+} geom[TILFMT_NFORMATS] = {
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+};
+
+
+/* lookup table for registers w/ per-engine instances */
+static const uint32_t reg[][4] = {
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+};
+
+/* simple allocator to grab next 16 byte aligned memory from txn */
+static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+{
+ void *ptr;
+ struct refill_engine *engine = txn->engine_handle;
+
+ /* dmm programming requires 16 byte aligned addresses */
+ txn->current_pa = round_up(txn->current_pa, 16);
+ txn->current_va = (void *)round_up((long)txn->current_va, 16);
+
+ ptr = txn->current_va;
+ *pa = txn->current_pa;
+
+ txn->current_pa += sz;
+ txn->current_va += sz;
+
+ BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+
+ return ptr;
+}
+
+/* check status and spin until wait_mask comes true */
+static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+{
+ struct dmm *dmm = engine->dmm;
+ uint32_t r = 0, err, i;
+
+ i = DMM_FIXED_RETRY_COUNT;
+ while (true) {
+ r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ err = r & DMM_PATSTATUS_ERR;
+ if (err)
+ return -EFAULT;
+
+ if ((r & wait_mask) == wait_mask)
+ break;
+
+ if (--i == 0)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static void release_engine(struct refill_engine *engine)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&engine->idle_node, &omap_dmm->idle_head);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ atomic_inc(&omap_dmm->engine_counter);
+ wake_up_interruptible(&omap_dmm->engine_queue);
+}
+
+static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+{
+ struct dmm *dmm = arg;
+ uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ int i;
+
+ /* ack IRQ */
+ writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+
+ for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_LST) {
+ wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+
+ if (dmm->engines[i].async)
+ release_engine(&dmm->engines[i]);
+ }
+
+ status >>= 8;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Get a handle for a DMM transaction
+ */
+static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+{
+ struct dmm_txn *txn = NULL;
+ struct refill_engine *engine = NULL;
+ int ret;
+ unsigned long flags;
+
+
+ /* wait until an engine is available */
+ ret = wait_event_interruptible(omap_dmm->engine_queue,
+ atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* grab an idle engine */
+ spin_lock_irqsave(&list_lock, flags);
+ if (!list_empty(&dmm->idle_head)) {
+ engine = list_entry(dmm->idle_head.next, struct refill_engine,
+ idle_node);
+ list_del(&engine->idle_node);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ BUG_ON(!engine);
+
+ txn = &engine->txn;
+ engine->tcm = tcm;
+ txn->engine_handle = engine;
+ txn->last_pat = NULL;
+ txn->current_va = engine->refill_va;
+ txn->current_pa = engine->refill_pa;
+
+ return txn;
+}
+
+/**
+ * Add region to DMM transaction. If pages or pages[i] is NULL, then the
+ * corresponding slot is cleared (ie. dummy_pa is programmed)
+ */
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ struct page **pages, uint32_t npages, uint32_t roll)
+{
+ dma_addr_t pat_pa = 0;
+ uint32_t *data;
+ struct pat *pat;
+ struct refill_engine *engine = txn->engine_handle;
+ int columns = (1 + area->x1 - area->x0);
+ int rows = (1 + area->y1 - area->y0);
+ int i = columns*rows;
+
+ pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+
+ if (txn->last_pat)
+ txn->last_pat->next_pa = (uint32_t)pat_pa;
+
+ pat->area = *area;
+
+ /* adjust Y coordinates based off of container parameters */
+ pat->area.y0 += engine->tcm->y_offset;
+ pat->area.y1 += engine->tcm->y_offset;
+
+ pat->ctrl = (struct pat_ctrl){
+ .start = 1,
+ .lut_id = engine->tcm->lut_id,
+ };
+
+ data = alloc_dma(txn, 4*i, &pat->data_pa);
+
+ while (i--) {
+ int n = i + roll;
+ if (n >= npages)
+ n -= npages;
+ data[i] = (pages && pages[n]) ?
+ page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+ }
+
+ txn->last_pat = pat;
+
+ return;
+}
+
+/**
+ * Commit the DMM transaction.
+ */
+static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+{
+ int ret = 0;
+ struct refill_engine *engine = txn->engine_handle;
+ struct dmm *dmm = engine->dmm;
+
+ if (!txn->last_pat) {
+ dev_err(engine->dmm->dev, "need at least one txn\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ txn->last_pat->next_pa = 0;
+
+ /* write to PAT_DESCR to clear out any pending transaction */
+ writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+
+ /* wait for engine ready: */
+ ret = wait_status(engine, DMM_PATSTATUS_READY);
+ if (ret) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* mark whether it is async to denote list management in IRQ handler */
+ engine->async = wait ? false : true;
+
+ /* kick reload */
+ writel(engine->refill_pa,
+ dmm->base + reg[PAT_DESCR][engine->id]);
+
+ if (wait) {
+ if (wait_event_interruptible_timeout(engine->wait_for_refill,
+ wait_status(engine, DMM_PATSTATUS_READY) == 0,
+ msecs_to_jiffies(1)) <= 0) {
+ dev_err(dmm->dev, "timed out waiting for done\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+cleanup:
+ /* only place engine back on list if we are done with it */
+ if (ret || wait)
+ release_engine(engine);
+
+ return ret;
+}
+
+/*
+ * DMM programming
+ */
+static int fill(struct tcm_area *area, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret = 0;
+ struct tcm_area slice, area_s;
+ struct dmm_txn *txn;
+
+ txn = dmm_txn_init(omap_dmm, area->tcm);
+ if (IS_ERR_OR_NULL(txn))
+ return -ENOMEM;
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ struct pat_area p_area = {
+ .x0 = slice.p0.x, .y0 = slice.p0.y,
+ .x1 = slice.p1.x, .y1 = slice.p1.y,
+ };
+
+ dmm_txn_append(txn, &p_area, pages, npages, roll);
+
+ roll += tcm_sizeof(slice);
+ }
+
+ ret = dmm_txn_commit(txn, wait);
+
+ return ret;
+}
+
+/*
+ * Pin/unpin
+ */
+
+/* note: slots for which pages[i] == NULL are filled w/ dummy page
+ */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret;
+
+ ret = fill(&block->area, pages, npages, roll, wait);
+
+ if (ret)
+ tiler_unpin(block);
+
+ return ret;
+}
+
+int tiler_unpin(struct tiler_block *block)
+{
+ return fill(&block->area, NULL, 0, 0, false);
+}
+
+/*
+ * Reserve/release
+ */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+ uint16_t h, uint16_t align)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ u32 min_align = 128;
+ int ret;
+ unsigned long flags;
+
+ BUG_ON(!validfmt(fmt));
+
+ /* convert width/height to slots */
+ w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+ h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+
+ /* convert alignment to slots */
+ min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+ align = ALIGN(align, min_align);
+ align /= geom[fmt].slot_w * geom[fmt].cpp;
+
+ block->fmt = fmt;
+
+ ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+ if (ret) {
+ kfree(block);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* add to allocation list */
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return block;
+}
+
+struct tiler_block *tiler_reserve_1d(size_t size)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long flags;
+
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ block->fmt = TILFMT_PAGE;
+
+ if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+ &block->area)) {
+ kfree(block);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return block;
+}
+
+/* note: if you have pin'd pages, you should have already unpin'd first! */
+int tiler_release(struct tiler_block *block)
+{
+ int ret = tcm_free(&block->area);
+ unsigned long flags;
+
+ if (block->area.tcm)
+ dev_err(omap_dmm->dev, "failed to release block\n");
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_del(&block->alloc_node);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ kfree(block);
+ return ret;
+}
+
+/*
+ * Utils
+ */
+
+/* calculate the tiler space address of a pixel in a view orientation...
+ * below description copied from the display subsystem section of TRM:
+ *
+ * When the TILER is addressed, the bits:
+ * [28:27] = 0x0 for 8-bit tiled
+ * 0x1 for 16-bit tiled
+ * 0x2 for 32-bit tiled
+ * 0x3 for page mode
+ * [31:29] = 0x0 for 0-degree view
+ * 0x1 for 180-degree view + mirroring
+ * 0x2 for 0-degree view + mirroring
+ * 0x3 for 180-degree view
+ * 0x4 for 270-degree view + mirroring
+ * 0x5 for 270-degree view
+ * 0x6 for 90-degree view
+ * 0x7 for 90-degree view + mirroring
+ * Otherwise the bits indicated the corresponding bit address to access
+ * the SDRAM.
+ */
+static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
+ DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
+ x, x, x_mask, y, y, y_mask);
+ return 0;
+ }
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+dma_addr_t tiler_ssptr(struct tiler_block *block)
+{
+ BUG_ON(!validfmt(block->fmt));
+
+ return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
+ block->area.p0.x * geom[block->fmt].slot_w,
+ block->area.p0.y * geom[block->fmt].slot_h);
+}
+
+dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
+ uint32_t x, uint32_t y)
+{
+ struct tcm_pt *p = &block->area.p0;
+ BUG_ON(!validfmt(block->fmt));
+
+ return tiler_get_address(block->fmt, orient,
+ (p->x * geom[block->fmt].slot_w) + x,
+ (p->y * geom[block->fmt].slot_h) + y);
+}
+
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+{
+ BUG_ON(!validfmt(fmt));
+ *w = round_up(*w, geom[fmt].slot_w);
+ *h = round_up(*h, geom[fmt].slot_h);
+}
+
+uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
+{
+ BUG_ON(!validfmt(fmt));
+
+ if (orient & MASK_XY_FLIP)
+ return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
+ else
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ tiler_align(fmt, &w, &h);
+ return geom[fmt].cpp * w * h;
+}
+
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ BUG_ON(!validfmt(fmt));
+ return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+}
+
+bool dmm_is_available(void)
+{
+ return omap_dmm ? true : false;
+}
+
+static int omap_dmm_remove(struct platform_device *dev)
+{
+ struct tiler_block *block, *_block;
+ int i;
+ unsigned long flags;
+
+ if (omap_dmm) {
+ /* free all area regions */
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+ alloc_node) {
+ list_del(&block->alloc_node);
+ kfree(block);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ for (i = 0; i < omap_dmm->num_lut; i++)
+ if (omap_dmm->tcm && omap_dmm->tcm[i])
+ omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+ kfree(omap_dmm->tcm);
+
+ kfree(omap_dmm->engines);
+ if (omap_dmm->refill_va)
+ dma_free_writecombine(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va,
+ omap_dmm->refill_pa);
+ if (omap_dmm->dummy_page)
+ __free_page(omap_dmm->dummy_page);
+
+ if (omap_dmm->irq > 0)
+ free_irq(omap_dmm->irq, omap_dmm);
+
+ iounmap(omap_dmm->base);
+ kfree(omap_dmm);
+ omap_dmm = NULL;
+ }
+
+ return 0;
+}
+
+static int omap_dmm_probe(struct platform_device *dev)
+{
+ int ret = -EFAULT, i;
+ struct tcm_area area = {0};
+ u32 hwinfo, pat_geom;
+ struct resource *mem;
+
+ omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
+ if (!omap_dmm) {
+ dev_err(&dev->dev, "failed to allocate driver data section\n");
+ goto fail;
+ }
+
+ /* initialize lists */
+ INIT_LIST_HEAD(&omap_dmm->alloc_head);
+ INIT_LIST_HEAD(&omap_dmm->idle_head);
+
+ init_waitqueue_head(&omap_dmm->engine_queue);
+
+ /* lookup hwmod data - base address and irq */
+ mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&dev->dev, "failed to get base address resource\n");
+ goto fail;
+ }
+
+ omap_dmm->base = ioremap(mem->start, SZ_2K);
+
+ if (!omap_dmm->base) {
+ dev_err(&dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ omap_dmm->irq = platform_get_irq(dev, 0);
+ if (omap_dmm->irq < 0) {
+ dev_err(&dev->dev, "failed to get IRQ resource\n");
+ goto fail;
+ }
+
+ omap_dmm->dev = &dev->dev;
+
+ hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+ omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+ omap_dmm->container_width = 256;
+ omap_dmm->container_height = 128;
+
+ atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
+
+ /* read out actual LUT width and height */
+ pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+ omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+
+ /* increment LUT by one if on OMAP5 */
+ /* LUT has twice the height, and is split into a separate container */
+ if (omap_dmm->lut_height != omap_dmm->container_height)
+ omap_dmm->num_lut++;
+
+ /* initialize DMM registers */
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+ writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+ writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+
+ ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+ "omap_dmm_irq_handler", omap_dmm);
+
+ if (ret) {
+ dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
+ omap_dmm->irq, ret);
+ omap_dmm->irq = -1;
+ goto fail;
+ }
+
+ /* Enable all interrupts for each refill engine except
+ * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+ * about because we want to be able to refill live scanout
+ * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+ * we just generally don't care about.
+ */
+ writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+
+ omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+ if (!omap_dmm->dummy_page) {
+ dev_err(&dev->dev, "could not allocate dummy page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* set dma mask for device */
+ /* NOTE: this is a workaround for the hwmod not initializing properly */
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+
+ /* alloc refill memory */
+ omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
+ if (!omap_dmm->refill_va) {
+ dev_err(&dev->dev, "could not allocate refill memory\n");
+ goto fail;
+ }
+
+ /* alloc engines */
+ omap_dmm->engines = kzalloc(
+ omap_dmm->num_engines * sizeof(struct refill_engine),
+ GFP_KERNEL);
+ if (!omap_dmm->engines) {
+ dev_err(&dev->dev, "could not allocate engines\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < omap_dmm->num_engines; i++) {
+ omap_dmm->engines[i].id = i;
+ omap_dmm->engines[i].dmm = omap_dmm;
+ omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+ (REFILL_BUFFER_SIZE * i);
+ omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+ (REFILL_BUFFER_SIZE * i);
+ init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+
+ list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+ }
+
+ omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+ GFP_KERNEL);
+ if (!omap_dmm->tcm) {
+ dev_err(&dev->dev, "failed to allocate lut ptrs\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* init containers */
+ /* Each LUT is associated with a TCM (container manager). We use the
+ lut_id to denote the lut_id used to identify the correct LUT for
+ programming during reill operations */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+ omap_dmm->container_height,
+ NULL);
+
+ if (!omap_dmm->tcm[i]) {
+ dev_err(&dev->dev, "failed to allocate container\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->tcm[i]->lut_id = i;
+ }
+
+ /* assign access mode containers to applicable tcm container */
+ /* OMAP 4 has 1 container for all 4 views */
+ /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
+ containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+
+ if (omap_dmm->container_height != omap_dmm->lut_height) {
+ /* second LUT is used for PAGE mode. Programming must use
+ y offset that is added to all y coordinates. LUT id is still
+ 0, because it is the same LUT, just the upper 128 lines */
+ containers[TILFMT_PAGE] = omap_dmm->tcm[1];
+ omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
+ omap_dmm->tcm[1]->lut_id = 0;
+ } else {
+ containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+ }
+
+ area = (struct tcm_area) {
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(omap_dmm->dev, "refill failed");
+ }
+
+ dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+
+ return 0;
+
+fail:
+ if (omap_dmm_remove(dev))
+ dev_err(&dev->dev, "cleanup failed\n");
+ return ret;
+}
+
+/*
+ * debugfs support
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+static const char *special = ".,:;'\"`~!^-+";
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+ char c, bool ovw)
+{
+ int x, y;
+ for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+ for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+ if (map[y][x] == ' ' || ovw)
+ map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+ char c)
+{
+ map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+ return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+ return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+ char *p = map[yd] + (x0 / xdiv);
+ int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+ if (w >= 0) {
+ p += w;
+ while (*nice)
+ *p++ = *nice++;
+ }
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+ if (a->p0.y + 1 < a->p1.y) {
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+ 256 - 1);
+ } else if (a->p0.y < a->p1.y) {
+ if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+ text_map(map, xdiv, nice, a->p0.y / ydiv,
+ a->p0.x + xdiv, 256 - 1);
+ else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+ text_map(map, xdiv, nice, a->p1.y / ydiv,
+ 0, a->p1.y - xdiv);
+ } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+ text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+ }
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+ if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+ a->p0.x, a->p1.x);
+}
+
+int tiler_map_show(struct seq_file *s, void *arg)
+{
+ int xdiv = 2, ydiv = 1;
+ char **map = NULL, *global_map;
+ struct tiler_block *block;
+ struct tcm_area a, p;
+ int i;
+ const char *m2d = alphabet;
+ const char *a2d = special;
+ const char *m2dp = m2d, *a2dp = a2d;
+ char nice[128];
+ int h_adj;
+ int w_adj;
+ unsigned long flags;
+ int lut_idx;
+
+
+ if (!omap_dmm) {
+ /* early return if dmm/tiler device is not initialized */
+ return 0;
+ }
+
+ h_adj = omap_dmm->container_height / ydiv;
+ w_adj = omap_dmm->container_width / xdiv;
+
+ map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
+ global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+
+ if (!map || !global_map)
+ goto error;
+
+ for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
+ memset(map, 0, sizeof(h_adj * sizeof(*map)));
+ memset(global_map, ' ', (w_adj + 1) * h_adj);
+
+ for (i = 0; i < omap_dmm->container_height; i++) {
+ map[i] = global_map + i * (w_adj + 1);
+ map[i][w_adj] = 0;
+ }
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+ if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
+ if (block->fmt != TILFMT_PAGE) {
+ fill_map(map, xdiv, ydiv, &block->area,
+ *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice,
+ &block->area);
+ } else {
+ bool start = read_map_pt(map, xdiv,
+ ydiv, &block->area.p0) == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv,
+ &block->area.p1) == ' ';
+
+ tcm_for_each_slice(a, block->area, p)
+ fill_map(map, xdiv, ydiv, &a,
+ '=', true);
+ fill_map_pt(map, xdiv, ydiv,
+ &block->area.p0,
+ start ? '<' : 'X');
+ fill_map_pt(map, xdiv, ydiv,
+ &block->area.p1,
+ end ? '>' : 'X');
+ map_1d_info(map, xdiv, ydiv, nice,
+ &block->area);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (s) {
+ seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
+ for (i = 0; i < 128; i++)
+ seq_printf(s, "%03d:%s\n", i, map[i]);
+ seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
+ } else {
+ dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
+ lut_idx);
+ for (i = 0; i < 128; i++)
+ dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+ dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
+ lut_idx);
+ }
+ }
+
+error:
+ kfree(map);
+ kfree(global_map);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int omap_dmm_resume(struct device *dev)
+{
+ struct tcm_area area;
+ int i;
+
+ if (!omap_dmm)
+ return -ENODEV;
+
+ area = (struct tcm_area) {
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(dev, "refill failed");
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_dmm_pm_ops = {
+ .resume = omap_dmm_resume,
+};
+#endif
+
+struct platform_driver omap_dmm_driver = {
+ .probe = omap_dmm_probe,
+ .remove = omap_dmm_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DMM_DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &omap_dmm_pm_ops,
+#endif
+ },
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
+MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
+MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
new file mode 100644
index 00000000000..4fdd61e54bd
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_TILER_H
+#define OMAP_DMM_TILER_H
+
+#include "omap_drv.h"
+#include "tcm.h"
+
+enum tiler_fmt {
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT,
+ TILFMT_32BIT,
+ TILFMT_PAGE,
+ TILFMT_NFORMATS
+};
+
+struct pat_area {
+ u32 x0:8;
+ u32 y0:8;
+ u32 x1:8;
+ u32 y1:8;
+};
+
+struct tiler_block {
+ struct list_head alloc_node; /* node for global block list */
+ struct tcm_area area; /* area */
+ enum tiler_fmt fmt; /* format */
+};
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+/*
+Table 15-11. Coding and Description of TILER Orientations
+S Y X Description Alternate description
+0 0 0 0-degree view Natural view
+0 0 1 0-degree view with vertical mirror 180-degree view with horizontal mirror
+0 1 0 0-degree view with horizontal mirror 180-degree view with vertical mirror
+0 1 1 180-degree view
+1 0 0 90-degree view with vertical mirror 270-degree view with horizontal mirror
+1 0 1 270-degree view
+1 1 0 90-degree view
+1 1 1 90-degree view with horizontal mirror 270-degree view with vertical mirror
+ */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+#ifdef CONFIG_DEBUG_FS
+int tiler_map_show(struct seq_file *s, void *arg);
+#endif
+
+/* pin/unpin */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait);
+int tiler_unpin(struct tiler_block *block);
+
+/* reserve/release */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
+ uint16_t align);
+struct tiler_block *tiler_reserve_1d(size_t size);
+int tiler_release(struct tiler_block *block);
+
+/* utilities */
+dma_addr_t tiler_ssptr(struct tiler_block *block);
+dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
+ uint32_t x, uint32_t y);
+uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+bool dmm_is_available(void);
+
+extern struct platform_driver omap_dmm_driver;
+
+/* GEM bo flags -> tiler fmt */
+static inline enum tiler_fmt gem2fmt(uint32_t flags)
+{
+ switch (flags & OMAP_BO_TILED) {
+ case OMAP_BO_TILED_8:
+ return TILFMT_8BIT;
+ case OMAP_BO_TILED_16:
+ return TILFMT_16BIT;
+ case OMAP_BO_TILED_32:
+ return TILFMT_32BIT;
+ default:
+ return TILFMT_PAGE;
+ }
+}
+
+static inline bool validfmt(enum tiler_fmt fmt)
+{
+ switch (fmt) {
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ case TILFMT_PAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
new file mode 100644
index 00000000000..9083538bd16
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -0,0 +1,610 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "omap_dmm_tiler.h"
+
+#define DRIVER_NAME MODULE_NAME
+#define DRIVER_DESC "OMAP DRM"
+#define DRIVER_DATE "20110917"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ * CRTC: overlay
+ * encoder: manager.. with some extension to allow one primary CRTC
+ * and zero or more video CRTC's to be mapped to one encoder?
+ * connector: dssdev.. manager can be attached/detached from different
+ * devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ DBG("dev=%p", dev);
+ if (priv->fbdev)
+ drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs omap_mode_config_funcs = {
+ .fb_create = omap_framebuffer_create,
+ .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!strcmp(dssdev->name, "dvi"))
+ return DRM_MODE_CONNECTOR_DVID;
+ /* fallthrough */
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev = NULL;
+ int num_ovls = dss_feat_get_num_ovls();
+ int id;
+
+ drm_mode_config_init(dev);
+
+ omap_drm_irq_install(dev);
+
+ /*
+ * Create private planes and CRTCs for the last NUM_CRTCs overlay
+ * plus manager:
+ */
+ for (id = 0; id < min(num_crtc, num_ovls); id++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, true);
+ crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+ }
+
+ /*
+ * Create normal planes for the remaining overlays:
+ */
+ for (; id < num_ovls; id++) {
+ struct drm_plane *plane = omap_plane_init(dev, id, false);
+
+ BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+ priv->planes[priv->num_planes++] = plane;
+ }
+
+ for_each_dss_dev(dssdev) {
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
+ }
+
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
+ }
+
+ encoder = omap_encoder_init(dev, dssdev);
+
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ dssdev->name);
+ return -ENOMEM;
+ }
+
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev, encoder);
+
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /* figure out which crtc's we can connect the encoder to: */
+ encoder->possible_crtcs = 0;
+ for (id = 0; id < priv->num_crtcs; id++) {
+ enum omap_dss_output_id supported_outputs =
+ dss_feat_get_supported_outputs(pipe2chan(id));
+ if (supported_outputs & dssdev->output->id)
+ encoder->possible_crtcs |= (1 << id);
+ }
+ }
+
+ dev->mode_config.min_width = 32;
+ dev->mode_config.min_height = 32;
+
+ /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+ * to fill in these limits properly on different OMAP generations..
+ */
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &omap_mode_config_funcs;
+
+ return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_omap_param *args = data;
+
+ DBG("%p: param=%llu", dev, args->param);
+
+ switch (args->param) {
+ case OMAP_PARAM_CHIPSET_ID:
+ args->value = priv->omaprev;
+ break;
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ switch (args->param) {
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_new *args = data;
+ VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ args->size.bytes, args->flags);
+ return omap_gem_new_handle(dev, file_priv, args->size,
+ args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = omap_gem_op_sync(obj, args->op);
+
+ if (!ret)
+ ret = omap_gem_op_start(obj, args->op);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* XXX flushy, flushy */
+ ret = 0;
+
+ if (!ret)
+ ret = omap_gem_op_finish(obj, args->op);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->size = omap_gem_mmap_size(obj);
+ args->offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+ DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+ struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+ struct omap_drm_private *priv;
+ int ret;
+
+ DBG("load: dev=%p", dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ priv->omaprev = pdata->omaprev;
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+
+ INIT_LIST_HEAD(&priv->obj_list);
+
+ omap_gem_init(dev);
+
+ ret = omap_modeset_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev->dev_private = NULL;
+ kfree(priv);
+ return ret;
+ }
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret)
+ dev_warn(dev->dev, "could not init vblank\n");
+
+ priv->fbdev = omap_fbdev_init(dev);
+ if (!priv->fbdev) {
+ dev_warn(dev->dev, "omap_fbdev_init failed\n");
+ /* well, limp along without an fbdev.. maybe X11 will work? */
+ }
+
+ /* store off drm_device for use in pm ops */
+ dev_set_drvdata(dev->dev, dev);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ DBG("unload: dev=%p", dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ omap_drm_irq_uninstall(dev);
+
+ omap_fbdev_free(dev);
+ omap_modeset_free(dev);
+ omap_gem_deinit(dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ dev_set_drvdata(dev->dev, NULL);
+
+ return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+ file->driver_priv = NULL;
+
+ DBG("open: dev=%p, file=%p", dev, file);
+
+ return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+ DBG("firstopen: dev=%p", dev);
+ return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+ int i;
+
+ /* we don't support vga-switcheroo.. so just make sure the fbdev
+ * mode is active
+ */
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ DBG("lastclose: dev=%p", dev);
+
+ if (priv->rotation_prop) {
+ /* need to restore default rotation state.. not sure
+ * if there is a cleaner way to restore properties to
+ * default state? Maybe a flag that properties should
+ * automatically be restored to default state on
+ * lastclose?
+ */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ drm_object_property_set_value(&priv->crtcs[i]->base,
+ priv->rotation_prop, 0);
+ }
+
+ for (i = 0; i < priv->num_planes; i++) {
+ drm_object_property_set_value(&priv->planes[i]->base,
+ priv->rotation_prop, 0);
+ }
+ }
+
+ drm_modeset_lock_all(dev);
+ ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+static const struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations omapdriver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver omap_drm_driver = {
+ .driver_features =
+ DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .firstopen = dev_firstopen,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
+#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = omap_gem_prime_export,
+ .gem_prime_import = omap_gem_prime_import,
+ .gem_init_object = omap_gem_init_object,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = omap_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = &omapdriver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+ DBG("");
+ return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+ DBG("");
+ return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+ DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+ DBG("%s", device->name);
+ return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+ DBG("");
+ drm_platform_exit(&omap_drm_driver, device);
+
+ platform_driver_unregister(&omap_dmm_driver);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops omapdrm_pm_ops = {
+ .resume = omap_gem_resume,
+};
+#endif
+
+struct platform_driver pdev = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &omapdrm_pm_ops,
+#endif
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
+ .suspend = pdev_suspend,
+ .resume = pdev_resume,
+ .shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+ DBG("init");
+ if (platform_driver_register(&omap_dmm_driver)) {
+ /* we can continue on without DMM.. so not fatal */
+ dev_err(NULL, "DMM registration failed\n");
+ }
+ return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
new file mode 100644
index 00000000000..d4f997bb4ac
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -0,0 +1,333 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/omap_drm.h>
+#include <linux/platform_data/omap_drm.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+ uint32_t rotation;
+ int32_t crtc_x, crtc_y; /* signed because can be offscreen */
+ uint32_t crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared. So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared. The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+ struct list_head pending_node, queued_node;
+ bool queued;
+ void (*pre_apply)(struct omap_drm_apply *apply);
+ void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration. We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout);
+
+struct omap_drm_private {
+ uint32_t omaprev;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[8];
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+
+ struct drm_fb_helper *fbdev;
+
+ struct workqueue_struct *wq;
+
+ /* list of GEM objects: */
+ struct list_head obj_list;
+
+ bool has_dmm;
+
+ /* properties: */
+ struct drm_property *rotation_prop;
+ struct drm_property *zorder_prop;
+
+ /* irq handling: */
+ struct list_head irq_list; /* list of omap_drm_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ struct omap_drm_irq error_handler;
+};
+
+/* this should probably be in drm-core to standardize amongst drivers */
+#define DRM_ROTATE_0 0
+#define DRM_ROTATE_90 1
+#define DRM_ROTATE_180 2
+#define DRM_ROTATE_270 3
+#define DRM_REFLECT_X 4
+#define DRM_REFLECT_Y 5
+
+#ifdef CONFIG_DEBUG_FS
+int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_cleanup(struct drm_minor *minor);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
+#endif
+
+#ifdef CONFIG_PM
+int omap_gem_resume(struct device *dev);
+#endif
+
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply);
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, enum omap_channel channel, int id);
+
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ int plane_id, bool private_plane);
+int omap_plane_dpms(struct drm_plane *plane, int mode);
+int omap_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ void (*fxn)(void *), void *arg);
+void omap_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+int omap_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h);
+
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode);
+
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes);
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo));
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+ struct omap_drm_window *win, struct omap_overlay_info *info);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h);
+
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+ enum dma_data_direction dir);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap);
+int omap_gem_put_pages(struct drm_gem_object *obj);
+uint32_t omap_gem_flags(struct drm_gem_object *obj);
+int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
+ int x, int y, dma_addr_t *paddr);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
+int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
+
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* in case someone tries to feed us a completely bogus stride: */
+ pitch = max(pitch, width * bytespp);
+ /* PVR needs alignment to 8 pixels.. right now that is the most
+ * restrictive stride requirement..
+ */
+ return ALIGN(pitch, 8 * bytespp);
+}
+
+static inline enum omap_channel pipe2chan(int pipe)
+{
+ int num_mgrs = dss_feat_get_num_mgrs();
+
+ /*
+ * We usually don't want to create a CRTC for each manager,
+ * at least not until we have a way to expose private planes
+ * to userspace. Otherwise there would not be enough video
+ * pipes left for drm planes. The higher #'d managers tend
+ * to have more features so start in reverse order.
+ */
+ return num_mgrs - pipe - 1;
+}
+
+/* map crtc to vblank mask */
+static inline uint32_t pipe2vbl(int crtc)
+{
+ enum omap_channel channel = pipe2chan(crtc);
+ return dispc_mgr_get_vsync_irq(channel);
+}
+
+static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
+ if (priv->crtcs[i] == crtc)
+ return i;
+
+ BUG(); /* bogus CRTC ptr */
+ return -1;
+}
+
+/* should these be made into common util helpers?
+ */
+
+static inline int objects_lookup(struct drm_device *dev,
+ struct drm_file *filp, uint32_t pixel_format,
+ struct drm_gem_object **bos, uint32_t *handles)
+{
+ int i, n = drm_format_num_planes(pixel_format);
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
+ if (!bos[i])
+ goto fail;
+
+ }
+
+ return 0;
+
+fail:
+ while (--i > 0)
+ drm_gem_object_unreference_unlocked(bos[i]);
+
+ return -ENOENT;
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
new file mode 100644
index 00000000000..7e1f2ab6537
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -0,0 +1,170 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include <linux/list.h>
+
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
+struct omap_encoder {
+ struct drm_encoder base;
+ struct omap_dss_device *dssdev;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+ .dpms = omap_encoder_dpms,
+ .mode_fixup = omap_encoder_mode_fixup,
+ .mode_set = omap_encoder_mode_set,
+ .prepare = omap_encoder_prepare,
+ .commit = omap_encoder_commit,
+};
+
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ if (enabled) {
+ return dssdrv->enable(dssdev);
+ } else {
+ dssdrv->disable(dssdev);
+ return 0;
+ }
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ int ret;
+
+ dssdev->output->manager = mgr;
+
+ ret = dssdrv->check_timings(dssdev, timings);
+ if (ret) {
+ dev_err(dev->dev, "could not set timings: %d\n", ret);
+ return ret;
+ }
+
+ dssdrv->set_timings(dssdev, timings);
+
+ return 0;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_dss_device *dssdev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct omap_encoder *omap_encoder;
+
+ omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+ if (!omap_encoder) {
+ dev_err(dev->dev, "could not allocate encoder\n");
+ goto fail;
+ }
+
+ omap_encoder->dssdev = dssdev;
+
+ encoder = &omap_encoder->base;
+
+ drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ omap_encoder_destroy(encoder);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
new file mode 100644
index 00000000000..9d5f6f696c7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -0,0 +1,472 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * framebuffer funcs
+ */
+
+/* per-format info: */
+struct format {
+ enum omap_color_mode dss_format;
+ uint32_t pixel_format;
+ struct {
+ int stride_bpp; /* this times width is stride */
+ int sub_y; /* sub-sample in y dimension */
+ } planes[4];
+ bool yuv;
+};
+
+static const struct format formats[] = {
+ /* 16bpp [A]RGB: */
+ { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */
+ { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
+ { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
+ { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
+ { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
+ { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
+ { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+ /* 24bpp RGB: */
+ { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */
+ /* 32bpp [A]RGB: */
+ { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
+ { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
+ { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
+ { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+ /* YUV: */
+ { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true },
+ { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true },
+ { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
+};
+
+/* convert from overlay's pixel formats bitmask to an array of fourcc's */
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes)
+{
+ uint32_t nformats = 0;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
+ if (formats[i].dss_format & supported_modes)
+ pixel_formats[nformats++] = formats[i].pixel_format;
+
+ return nformats;
+}
+
+/* per-plane info for the fb: */
+struct plane {
+ struct drm_gem_object *bo;
+ uint32_t pitch;
+ uint32_t offset;
+ dma_addr_t paddr;
+};
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+ struct drm_framebuffer base;
+ const struct format *format;
+ struct plane planes[4];
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return drm_gem_handle_create(file_priv,
+ omap_fb->planes[0].bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ if (plane->bo)
+ drm_gem_object_unreference_unlocked(plane->bo);
+ }
+
+ kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ int i;
+
+ for (i = 0; i < num_clips; i++) {
+ omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+ .create_handle = omap_framebuffer_create_handle,
+ .destroy = omap_framebuffer_destroy,
+ .dirty = omap_framebuffer_dirty,
+};
+
+static uint32_t get_linear_addr(struct plane *plane,
+ const struct format *format, int n, int x, int y)
+{
+ uint32_t offset;
+
+ offset = plane->offset +
+ (x * format->planes[n].stride_bpp) +
+ (y * plane->pitch / format->planes[n].sub_y);
+
+ return plane->paddr + offset;
+}
+
+/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
+ */
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+ struct omap_drm_window *win, struct omap_overlay_info *info)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ const struct format *format = omap_fb->format;
+ struct plane *plane = &omap_fb->planes[0];
+ uint32_t x, y, orient = 0;
+
+ info->color_mode = format->dss_format;
+
+ info->pos_x = win->crtc_x;
+ info->pos_y = win->crtc_y;
+ info->out_width = win->crtc_w;
+ info->out_height = win->crtc_h;
+ info->width = win->src_w;
+ info->height = win->src_h;
+
+ x = win->src_x;
+ y = win->src_y;
+
+ if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ uint32_t w = win->src_w;
+ uint32_t h = win->src_h;
+
+ switch (win->rotation & 0xf) {
+ default:
+ dev_err(fb->dev->dev, "invalid rotation: %02x",
+ (uint32_t)win->rotation);
+ /* fallthru to default to no rotation */
+ case 0:
+ case BIT(DRM_ROTATE_0):
+ orient = 0;
+ break;
+ case BIT(DRM_ROTATE_90):
+ orient = MASK_XY_FLIP | MASK_X_INVERT;
+ break;
+ case BIT(DRM_ROTATE_180):
+ orient = MASK_X_INVERT | MASK_Y_INVERT;
+ break;
+ case BIT(DRM_ROTATE_270):
+ orient = MASK_XY_FLIP | MASK_Y_INVERT;
+ break;
+ }
+
+ if (win->rotation & BIT(DRM_REFLECT_X))
+ orient ^= MASK_X_INVERT;
+
+ if (win->rotation & BIT(DRM_REFLECT_Y))
+ orient ^= MASK_Y_INVERT;
+
+ /* adjust x,y offset for flip/invert: */
+ if (orient & MASK_XY_FLIP)
+ swap(w, h);
+ if (orient & MASK_Y_INVERT)
+ y += h - 1;
+ if (orient & MASK_X_INVERT)
+ x += w - 1;
+
+ omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
+ info->rotation_type = OMAP_DSS_ROT_TILER;
+ info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
+ } else {
+ info->paddr = get_linear_addr(plane, format, 0, x, y);
+ info->rotation_type = OMAP_DSS_ROT_DMA;
+ info->screen_width = plane->pitch;
+ }
+
+ /* convert to pixels: */
+ info->screen_width /= format->planes[0].stride_bpp;
+
+ if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+ plane = &omap_fb->planes[1];
+
+ if (info->rotation_type == OMAP_DSS_ROT_TILER) {
+ WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
+ omap_gem_rotated_paddr(plane->bo, orient,
+ x/2, y/2, &info->p_uv_addr);
+ } else {
+ info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+ }
+ } else {
+ info->p_uv_addr = 0;
+ }
+}
+
+/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
+ * buffers to unpin are just pushed to the unpin fifo so that the
+ * caller can defer unpin until vblank.
+ *
+ * Note if this fails (ie. something went very wrong!), all buffers are
+ * unpinned, and the caller disables the overlay. We could have tried
+ * to revert back to the previous set of pinned buffers but if things are
+ * hosed there is no guarantee that would succeed.
+ */
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo))
+{
+ int ret = 0, i, na, nb;
+ struct omap_framebuffer *ofba = to_omap_framebuffer(a);
+ struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
+ uint32_t pinned_mask = 0;
+
+ na = a ? drm_format_num_planes(a->pixel_format) : 0;
+ nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+
+ for (i = 0; i < max(na, nb); i++) {
+ struct plane *pa, *pb;
+
+ pa = (i < na) ? &ofba->planes[i] : NULL;
+ pb = (i < nb) ? &ofbb->planes[i] : NULL;
+
+ if (pa)
+ unpin(arg, pa->bo);
+
+ if (pb && !ret) {
+ ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+ if (!ret) {
+ omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
+ pinned_mask |= (1 << i);
+ }
+ }
+ }
+
+ if (ret) {
+ /* something went wrong.. unpin what has been pinned */
+ for (i = 0; i < nb; i++) {
+ if (pinned_mask & (1 << i)) {
+ struct plane *pb = &ofba->planes[i];
+ unpin(arg, pb->bo);
+ }
+ }
+ }
+
+ return ret;
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ if (p >= drm_format_num_planes(fb->pixel_format))
+ return NULL;
+ return omap_fb->planes[p].bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from)
+{
+ struct drm_device *dev = fb->dev;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector *connector = from;
+
+ if (!from)
+ return list_first_entry(connector_list, typeof(*from), head);
+
+ list_for_each_entry_from(connector, connector_list, head) {
+ if (connector != from) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ if (crtc && crtc->fb == fb)
+ return connector;
+
+ }
+ }
+
+ return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h)
+{
+ struct drm_connector *connector = NULL;
+
+ VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+ while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+ /* only consider connectors that are part of a chain */
+ if (connector->encoder && connector->encoder->crtc) {
+ /* TODO: maybe this should propagate thru the crtc who
+ * could do the coordinate translation..
+ */
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ int cx = max(0, x - crtc->x);
+ int cy = max(0, y - crtc->y);
+ int cw = w + (x - crtc->x) - cx;
+ int ch = h + (y - crtc->y) - cy;
+
+ omap_connector_flush(connector, cx, cy, cw, ch);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, plane->offset, plane->pitch);
+ omap_gem_describe(plane->bo, m);
+ }
+}
+#endif
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *bos[4];
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+ bos, mode_cmd->handles);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ int i, n = drm_format_num_planes(mode_cmd->pixel_format);
+ for (i = 0; i < n; i++)
+ drm_gem_object_unreference_unlocked(bos[i]);
+ return fb;
+ }
+ return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ struct omap_framebuffer *omap_fb;
+ struct drm_framebuffer *fb = NULL;
+ const struct format *format = NULL;
+ int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixel_format == mode_cmd->pixel_format) {
+ format = &formats[i];
+ break;
+ }
+ }
+
+ if (!format) {
+ dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+ if (!omap_fb) {
+ dev_err(dev->dev, "could not allocate fb\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &omap_fb->base;
+ omap_fb->format = format;
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ int size, pitch = mode_cmd->pitches[i];
+
+ if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
+ dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
+ pitch, mode_cmd->width * format->planes[i].stride_bpp);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = pitch * mode_cmd->height / format->planes[i].sub_y;
+
+ if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
+ dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
+ bos[i]->size - mode_cmd->offsets[i], size);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ plane->bo = bos[i];
+ plane->offset = mode_cmd->offsets[i];
+ plane->pitch = pitch;
+ plane->paddr = 0;
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ if (fb)
+ omap_framebuffer_destroy(fb);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
new file mode 100644
index 00000000000..f0033bd3e4a
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -0,0 +1,399 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+static bool ywrap_enabled = true;
+module_param_named(ywrap, ywrap_enabled, bool, 0644);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+ bool ywrap_enabled;
+
+ /* for deferred dmm roll when getting called in atomic ctx */
+ struct work_struct work;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t res;
+
+ res = fb_sys_write(fbi, buf, count, ppos);
+ omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+ return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(fbi, rect);
+ omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(fbi, area);
+ omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+ const struct fb_image *image)
+{
+ sys_imageblit(fbi, image);
+ omap_fbdev_flush(fbi, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static void pan_worker(struct work_struct *work)
+{
+ struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
+ struct fb_info *fbi = fbdev->base.fbdev;
+ int npages;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
+}
+
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+
+ if (!helper)
+ goto fallback;
+
+ if (!fbdev->ywrap_enabled)
+ goto fallback;
+
+ if (drm_can_sleep()) {
+ pan_worker(&fbdev->work);
+ } else {
+ struct omap_drm_private *priv = helper->dev->dev_private;
+ queue_work(priv->wq, &fbdev->work);
+ }
+
+ return 0;
+
+fallback:
+ return drm_fb_helper_pan_display(var, fbi);
+}
+
+static struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = omap_fbdev_write,
+ .fb_fillrect = omap_fbdev_fillrect,
+ .fb_copyarea = omap_fbdev_copyarea,
+ .fb_imageblit = omap_fbdev_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb = NULL;
+ union omap_gem_size gsize;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ dma_addr_t paddr;
+ int ret;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = align_pitch(
+ mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
+ mode_cmd.width, sizes->surface_bpp);
+
+ fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+ if (fbdev->ywrap_enabled) {
+ /* need to align pitch to page size if using DMM scrolling */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ }
+
+ /* allocate backing bo */
+ gsize = (union omap_gem_size){
+ .bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
+ };
+ DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+ fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+ if (!fbdev->bo) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference(fbdev->bo);
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ /* note: this keeps the bo pinned.. which is perhaps not ideal,
+ * but is needed as long as we use fb_mmap() to mmap to userspace
+ * (since this happens using fix.smem_start). Possibly we could
+ * implement our own mmap using GEM mmap support to avoid this
+ * (non-tiled buffer doesn't need to be pinned for fbcon to write
+ * to it). Then we just need to be sure that we are able to re-
+ * pin it in case of an opps.
+ */
+ ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
+ if (ret) {
+ dev_err(dev->dev,
+ "could not map (paddr)! Skipping framebuffer alloc\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &omap_fb_ops;
+
+ strcpy(fbi->fix.id, MODULE_NAME);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+ fbi->screen_size = fbdev->bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = fbdev->bo->size;
+
+ /* if we have DMM, then we can use it for scrolling by just
+ * shuffling pages around in DMM rather than doing sw blit.
+ */
+ if (fbdev->ywrap_enabled) {
+ DRM_INFO("Enabling DMM ywrap scrolling\n");
+ fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+ fbi->fix.ywrapstep = 1;
+ }
+
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+
+ return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .gamma_set = omap_crtc_fb_gamma_set,
+ .gamma_get = omap_crtc_fb_gamma_get,
+ .fb_probe = omap_fbdev_create,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return;
+
+ VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+ omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "could not allocate fbdev\n");
+ goto fail;
+ }
+
+ INIT_WORK(&fbdev->work, pan_worker);
+
+ helper = &fbdev->base;
+
+ helper->funcs = &omap_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct omap_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_omap_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
new file mode 100644
index 00000000000..e8302b02691
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -0,0 +1,1511 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* remove these once drm core helpers are merged */
+struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
+#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
+
+
+struct omap_gem_object {
+ struct drm_gem_object base;
+
+ struct list_head mm_list;
+
+ uint32_t flags;
+
+ /** width/height for tiled formats (rounded up to slot boundaries) */
+ uint16_t width, height;
+
+ /** roll applied when mapping to DMM */
+ uint32_t roll;
+
+ /**
+ * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+ * is set and the paddr is valid. Also if the buffer is remapped in
+ * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
+ * the physical address and OMAP_BO_DMA is not set, then you should
+ * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+ * not removed from under your feet.
+ *
+ * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+ * buffer is requested, but doesn't mean that it is. Use the
+ * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+ * physical address.
+ */
+ dma_addr_t paddr;
+
+ /**
+ * # of users of paddr
+ */
+ uint32_t paddr_cnt;
+
+ /**
+ * tiler block used when buffer is remapped in DMM/TILER.
+ */
+ struct tiler_block *block;
+
+ /**
+ * Array of backing pages, if allocated. Note that pages are never
+ * allocated for buffers originally allocated from contiguous memory
+ */
+ struct page **pages;
+
+ /** addresses corresponding to pages in above array */
+ dma_addr_t *addrs;
+
+ /**
+ * Virtual address, if mapped.
+ */
+ void *vaddr;
+
+ /**
+ * sync-object allocated on demand (if needed)
+ *
+ * Per-buffer sync-object for tracking pending and completed hw/dma
+ * read and write operations. The layout in memory is dictated by
+ * the SGX firmware, which uses this information to stall the command
+ * stream if a surface is not ready yet.
+ *
+ * Note that when buffer is used by SGX, the sync-object needs to be
+ * allocated from a special heap of sync-objects. This way many sync
+ * objects can be packed in a page, and not waste GPU virtual address
+ * space. Because of this we have to have a omap_gem_set_sync_object()
+ * API to allow replacement of the syncobj after it has (potentially)
+ * already been allocated. A bit ugly but I haven't thought of a
+ * better alternative.
+ */
+ struct {
+ uint32_t write_pending;
+ uint32_t write_complete;
+ uint32_t read_pending;
+ uint32_t read_complete;
+ } *sync;
+};
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+static uint64_t mmap_offset(struct drm_gem_object *obj);
+
+/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+ * not necessarily pinned in TILER all the time, and (b) when they are
+ * they are not necessarily page aligned, we reserve one or more small
+ * regions in each of the 2d containers to use as a user-GART where we
+ * can create a second page-aligned mapping of parts of the buffer
+ * being accessed from userspace.
+ *
+ * Note that we could optimize slightly when we know that multiple
+ * tiler containers are backed by the same PAT.. but I'll leave that
+ * for later..
+ */
+#define NUM_USERGART_ENTRIES 2
+struct usergart_entry {
+ struct tiler_block *block; /* the reserved tiler block */
+ dma_addr_t paddr;
+ struct drm_gem_object *obj; /* the current pinned obj */
+ pgoff_t obj_pgoff; /* page offset of obj currently
+ mapped in */
+};
+static struct {
+ struct usergart_entry entry[NUM_USERGART_ENTRIES];
+ int height; /* height in rows */
+ int height_shift; /* ilog2(height in rows) */
+ int slot_shift; /* ilog2(width per slot) */
+ int stride_pfn; /* stride in pages */
+ int last; /* index of last used entry */
+} *usergart;
+
+static void evict_entry(struct drm_gem_object *obj,
+ enum tiler_fmt fmt, struct usergart_entry *entry)
+{
+ if (obj->dev->dev_mapping) {
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int n = usergart[fmt].height;
+ size_t size = PAGE_SIZE * n;
+ loff_t off = mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ if (m > 1) {
+ int i;
+ /* if stride > than PAGE_SIZE then sparse mapping: */
+ for (i = n; i > 0; i--) {
+ unmap_mapping_range(obj->dev->dev_mapping,
+ off, PAGE_SIZE, 1);
+ off += PAGE_SIZE * m;
+ }
+ } else {
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
+ }
+
+ entry->obj = NULL;
+}
+
+/* Evict a buffer from usergart, if it is mapped there */
+static void evict(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ int i;
+
+ if (!usergart)
+ return;
+
+ for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+ struct usergart_entry *entry = &usergart[fmt].entry[i];
+ if (entry->obj == obj)
+ evict_entry(obj, fmt, entry);
+ }
+ }
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+ return obj->filp != NULL;
+}
+
+/**
+ * shmem buffers that are mapped cached can simulate coherency via using
+ * page faulting to keep track of dirty pages
+ */
+static inline bool is_cached_coherent(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ return is_shmem(obj) &&
+ ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+}
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct page **pages;
+ int npages = obj->size >> PAGE_SHIFT;
+ int i, ret;
+ dma_addr_t *addrs;
+
+ WARN_ON(omap_obj->pages);
+
+ /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+ * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+ * we actually want CMA memory for it all anyways..
+ */
+ pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+ return PTR_ERR(pages);
+ }
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+ if (!addrs) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ for (i = 0; i < npages; i++) {
+ addrs[i] = dma_map_page(dev->dev, pages[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ } else {
+ addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
+ if (!addrs) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+ }
+
+ omap_obj->addrs = addrs;
+ omap_obj->pages = pages;
+
+ return 0;
+
+free_pages:
+ _drm_gem_put_pages(obj, pages, true, false);
+
+ return ret;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ }
+
+ kfree(omap_obj->addrs);
+ omap_obj->addrs = NULL;
+
+ _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ omap_obj->pages = NULL;
+}
+
+/* get buffer flags */
+uint32_t omap_gem_flags(struct drm_gem_object *obj)
+{
+ return to_omap_bo(obj)->flags;
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->map_list.map) {
+ /* Make it mmapable */
+ size_t size = omap_gem_mmap_size(obj);
+ int ret = _drm_gem_create_mmap_offset_size(obj, size);
+
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+ }
+
+ return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
+/** get mmap size */
+size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ size_t size = obj->size;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ /* for tiled buffers, the virtual size has stride rounded up
+ * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+ * 32kb later!). But we don't back the entire buffer with
+ * pages, only the valid picture part.. so need to adjust for
+ * this in the size used to mmap and generate mmap offset
+ */
+ size = tiler_vsize(gem2fmt(omap_obj->flags),
+ omap_obj->width, omap_obj->height);
+ }
+
+ return size;
+}
+
+/* get tiled size, returns -EINVAL if not tiled buffer */
+int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ *w = omap_obj->width;
+ *h = omap_obj->height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* Normal handling for the case of faulting in non-tiled buffers */
+static int fault_1d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ if (omap_obj->pages) {
+ omap_gem_cpu_sync(obj, pgoff);
+ pfn = page_to_pfn(omap_obj->pages[pgoff]);
+ } else {
+ BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+ pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+ }
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+}
+
+/* Special handling for the case of faulting in 2d tiled buffers */
+static int fault_2d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct usergart_entry *entry;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct page *pages[64]; /* XXX is this too much to have on stack? */
+ unsigned long pfn;
+ pgoff_t pgoff, base_pgoff;
+ void __user *vaddr;
+ int i, ret, slots;
+
+ /*
+ * Note the height of the slot is also equal to the number of pages
+ * that need to be mapped in to fill 4kb wide CPU page. If the slot
+ * height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ */
+ const int n = usergart[fmt].height;
+ const int n_shift = usergart[fmt].height_shift;
+
+ /*
+ * If buffer width in bytes > PAGE_SIZE then the virtual stride is
+ * rounded up to next multiple of PAGE_SIZE.. this need to be taken
+ * into account in some of the math, so figure out virtual stride
+ * in pages
+ */
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ /*
+ * Actual address we start mapping at is rounded down to previous slot
+ * boundary in the y direction:
+ */
+ base_pgoff = round_down(pgoff, m << n_shift);
+
+ /* figure out buffer width in slots */
+ slots = omap_obj->width >> usergart[fmt].slot_shift;
+
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
+ /* evict previous buffer using this usergart entry, if any: */
+ if (entry->obj)
+ evict_entry(entry->obj, fmt, entry);
+
+ entry->obj = obj;
+ entry->obj_pgoff = base_pgoff;
+
+ /* now convert base_pgoff to phys offset from virt offset: */
+ base_pgoff = (base_pgoff >> n_shift) * slots;
+
+ /* for wider-than 4k.. figure out which part of the slot-row we want: */
+ if (m > 1) {
+ int off = pgoff % m;
+ entry->obj_pgoff += off;
+ base_pgoff /= m;
+ slots = min(slots - (off << n_shift), n);
+ base_pgoff += off << n_shift;
+ vaddr += off << PAGE_SHIFT;
+ }
+
+ /*
+ * Map in pages. Beyond the valid pixel part of the buffer, we set
+ * pages[i] to NULL to get a dummy page mapped in.. if someone
+ * reads/writes it they will get random/undefined content, but at
+ * least it won't be corrupting whatever other random page used to
+ * be mapped in, or other undefined behavior.
+ */
+ memcpy(pages, &omap_obj->pages[base_pgoff],
+ sizeof(struct page *) * slots);
+ memset(pages + slots, 0,
+ sizeof(struct page *) * (n - slots));
+
+ ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (ret) {
+ dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ return ret;
+ }
+
+ pfn = entry->paddr >> PAGE_SHIFT;
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ for (i = n; i > 0; i--) {
+ vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+ pfn += usergart[fmt].stride_pfn;
+ vaddr += PAGE_SIZE * m;
+ }
+
+ /* simple round-robin: */
+ usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+
+ return 0;
+}
+
+/**
+ * omap_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /* if a shmem backed object, make sure we have pages attached now */
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+
+ /* where should we do corresponding put_pages().. we are mapping
+ * the original page, rather than thru a GART, so we can't rely
+ * on eviction to trigger this. But munmap() or all mappings should
+ * probably trigger put_pages()?
+ */
+
+ if (omap_obj->flags & OMAP_BO_TILED)
+ ret = fault_2d(obj, vma, vmf);
+ else
+ ret = fault_1d(obj, vma, vmf);
+
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ return omap_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (omap_obj->flags & OMAP_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ /*
+ * We do have some private objects, at least for scanout buffers
+ * on hardware without DMM/TILER. But these are allocated write-
+ * combine
+ */
+ if (WARN_ON(!obj->filp))
+ return -EINVAL;
+
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ vma->vm_pgoff = 0;
+ vma->vm_file = get_file(obj->filp);
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return 0;
+}
+
+
+/**
+ * omap_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ union omap_gem_size gsize;
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ gsize = (union omap_gem_size){
+ .bytes = args->size,
+ };
+
+ return omap_gem_new_handle(dev, file, gsize,
+ OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+/* Set scrolling position. This allows us to implement fast scrolling
+ * for console.
+ *
+ * Call only from non-atomic contexts.
+ */
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ if (roll > npages) {
+ dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+ return -EINVAL;
+ }
+
+ omap_obj->roll = roll;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ /* if we aren't mapped yet, we don't need to do anything */
+ if (omap_obj->block) {
+ struct page **pages;
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+ ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+ if (ret)
+ dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Sync the buffer for CPU access.. note pages should already be
+ * attached, ie. omap_gem_get_pages()
+ */
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
+ dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ omap_obj->addrs[pgoff] = 0;
+ }
+}
+
+/* sync the buffer for DMA access */
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+ enum dma_data_direction dir)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (is_cached_coherent(obj)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ struct page **pages = omap_obj->pages;
+ bool dirty = false;
+
+ for (i = 0; i < npages; i++) {
+ if (!omap_obj->addrs[i]) {
+ omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dirty = true;
+ }
+ }
+
+ if (dirty) {
+ unmap_mapping_range(obj->filp->f_mapping, 0,
+ omap_gem_mmap_size(obj), 1);
+ }
+ }
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap)
+{
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ if (remap && is_shmem(obj) && priv->has_dmm) {
+ if (omap_obj->paddr_cnt == 0) {
+ struct page **pages;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct tiler_block *block;
+
+ BUG_ON(omap_obj->block);
+
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ block = tiler_reserve_2d(fmt,
+ omap_obj->width,
+ omap_obj->height, 0);
+ } else {
+ block = tiler_reserve_1d(obj->size);
+ }
+
+ if (IS_ERR(block)) {
+ ret = PTR_ERR(block);
+ dev_err(obj->dev->dev,
+ "could not remap: %d (%d)\n", ret, fmt);
+ goto fail;
+ }
+
+ /* TODO: enable async refill.. */
+ ret = tiler_pin(block, pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ tiler_release(block);
+ dev_err(obj->dev->dev,
+ "could not pin: %d\n", ret);
+ goto fail;
+ }
+
+ omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->block = block;
+
+ DBG("got paddr: %08x", omap_obj->paddr);
+ }
+
+ omap_obj->paddr_cnt++;
+
+ *paddr = omap_obj->paddr;
+ } else if (omap_obj->flags & OMAP_BO_DMA) {
+ *paddr = omap_obj->paddr;
+ } else {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ if (omap_obj->paddr_cnt > 0) {
+ omap_obj->paddr_cnt--;
+ if (omap_obj->paddr_cnt == 0) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ goto fail;
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->block = NULL;
+ }
+ }
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* Get rotated scanout address (only valid if already pinned), at the
+ * specified orientation and x,y offset from top-left corner of buffer
+ * (only valid for tiled 2d buffers)
+ */
+int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
+ int x, int y, dma_addr_t *paddr)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = -EINVAL;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED)) {
+ *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
+ ret = 0;
+ }
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
+int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = -EINVAL;
+ if (omap_obj->flags & OMAP_BO_TILED)
+ ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
+ return ret;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ if (is_shmem(obj) && !omap_obj->pages) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ dev_err(obj->dev->dev, "could not attach pages\n");
+ return ret;
+ }
+ }
+
+ /* TODO: even phys-contig.. we should have a list of pages? */
+ *pages = omap_obj->pages;
+
+ return 0;
+}
+
+/* if !remap, and we don't have pages backing, then fail, rather than
+ * increasing the pin count (which we don't really do yet anyways,
+ * because we don't support swapping pages back out). And 'remap'
+ * might not be quite the right name, but I wanted to keep it working
+ * similarly to omap_gem_get_paddr(). Note though that mutex is not
+ * aquired if !remap (because this can be called in atomic ctxt),
+ * but probably omap_gem_get_paddr() should be changed to work in the
+ * same way. If !remap, a matching omap_gem_put_pages() call is not
+ * required (and should not be made).
+ */
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap)
+{
+ int ret;
+ if (!remap) {
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (!omap_obj->pages)
+ return -ENOMEM;
+ *pages = omap_obj->pages;
+ return 0;
+ }
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = get_pages(obj, pages);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* do something here if we dynamically attach/detach pages.. at
+ * least they would no longer need to be pinned if everyone has
+ * released the pages..
+ */
+ return 0;
+}
+
+/* Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev. This should be called with struct_mutex
+ * held.
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ if (!omap_obj->vaddr) {
+ struct page **pages;
+ int ret = get_pages(obj, &pages);
+ if (ret)
+ return ERR_PTR(ret);
+ omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return omap_obj->vaddr;
+}
+
+#ifdef CONFIG_PM
+/* re-pin objects in DMM in resume path: */
+int omap_gem_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = drm_dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ int ret = 0;
+
+ list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
+ if (omap_obj->block) {
+ struct drm_gem_object *obj = &omap_obj->base;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ WARN_ON(!omap_obj->pages); /* this can't happen */
+ ret = tiler_pin(omap_obj->block,
+ omap_obj->pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ dev_err(dev, "could not repin: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint64_t off = 0;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+ off, omap_obj->paddr, omap_obj->paddr_cnt,
+ omap_obj->vaddr, omap_obj->roll);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
+ if (omap_obj->block) {
+ struct tcm_area *area = &omap_obj->block->area;
+ seq_printf(m, " (%dx%d, %dx%d)",
+ area->p0.x, area->p0.y,
+ area->p1.x, area->p1.y);
+ }
+ } else {
+ seq_printf(m, " %d", obj->size);
+ }
+
+ seq_printf(m, "\n");
+}
+
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct omap_gem_object *omap_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(omap_obj, list, mm_list) {
+ struct drm_gem_object *obj = &omap_obj->base;
+ seq_printf(m, " ");
+ omap_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+ struct list_head list;
+ struct omap_gem_object *omap_obj;
+ enum omap_gem_op op;
+ uint32_t read_target, write_target;
+ /* notify called w/ sync_lock held */
+ void (*notify)(void *arg);
+ void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+ struct omap_gem_object *omap_obj = waiter->omap_obj;
+ if ((waiter->op & OMAP_GEM_READ) &&
+ (omap_obj->sync->read_complete < waiter->read_target))
+ return true;
+ if ((waiter->op & OMAP_GEM_WRITE) &&
+ (omap_obj->sync->write_complete < waiter->write_target))
+ return true;
+ return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+ printk(KERN_ERR "%s:%d: "fmt"\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+
+static void sync_op_update(void)
+{
+ struct omap_gem_sync_waiter *waiter, *n;
+ list_for_each_entry_safe(waiter, n, &waiters, list) {
+ if (!is_waiting(waiter)) {
+ list_del(&waiter->list);
+ SYNC("notify: %p", waiter);
+ waiter->notify(waiter->arg);
+ kfree(waiter);
+ }
+ }
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+ enum omap_gem_op op, bool start)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if (!omap_obj->sync) {
+ omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!omap_obj->sync) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ if (start) {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_pending++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_pending++;
+ } else {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_complete++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_complete++;
+ sync_op_update();
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+
+ return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient. So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+ spin_lock(&sync_lock);
+ sync_op_update();
+ spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+ struct task_struct **waiter_task = arg;
+ *waiter_task = NULL;
+ wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+ if (omap_obj->sync) {
+ struct task_struct *waiter_task = current;
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+ if (!waiter)
+ return -ENOMEM;
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = sync_notify;
+ waiter->arg = &waiter_task;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ ret = wait_event_interruptible(sync_event,
+ (waiter_task == NULL));
+ spin_lock(&sync_lock);
+ if (waiter_task) {
+ SYNC("interrupted: %p", waiter);
+ /* we were interrupted */
+ list_del(&waiter->list);
+ waiter_task = NULL;
+ } else {
+ /* freed in sync_op_update() */
+ waiter = NULL;
+ }
+ }
+ spin_unlock(&sync_lock);
+
+ if (waiter)
+ kfree(waiter);
+ }
+ return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked.. fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (omap_obj->sync) {
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+ if (!waiter)
+ return -ENOMEM;
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = fxn;
+ waiter->arg = arg;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ return 0;
+ }
+
+ spin_unlock(&sync_lock);
+ }
+
+ /* no waiting.. */
+ fxn(arg);
+
+ return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap. Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+ /* clearing a previously set syncobj */
+ syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
+ GFP_ATOMIC);
+ if (!syncobj) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ /* replacing an existing syncobj */
+ if (omap_obj->sync) {
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ kfree(omap_obj->sync);
+ }
+ omap_obj->flags |= OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+ return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL; /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ evict(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ list_del(&omap_obj->mm_list);
+
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+
+ /* this means the object is still pinned.. which really should
+ * not happen. I think..
+ */
+ WARN_ON(omap_obj->paddr_cnt > 0);
+
+ /* don't free externally allocated backing memory */
+ if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+ if (omap_obj->pages)
+ omap_gem_detach_pages(obj);
+
+ if (!is_shmem(obj)) {
+ dma_free_writecombine(dev->dev, obj->size,
+ omap_obj->vaddr, omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ }
+ }
+
+ /* don't free externally allocated syncobj */
+ if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
+ kfree(omap_obj->sync);
+
+ drm_gem_object_release(obj);
+
+ kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = omap_gem_new(dev, gsize, flags);
+ if (!obj)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, obj, handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+ return ret;
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ struct drm_gem_object *obj = NULL;
+ size_t size;
+ int ret;
+
+ if (flags & OMAP_BO_TILED) {
+ if (!usergart) {
+ dev_err(dev->dev, "Tiled buffers require DMM\n");
+ goto fail;
+ }
+
+ /* tiled buffers are always shmem paged backed.. when they are
+ * scanned out, they are remapped into DMM/TILER
+ */
+ flags &= ~OMAP_BO_SCANOUT;
+
+ /* currently don't allow cached buffers.. there is some caching
+ * stuff that needs to be handled better
+ */
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+ flags |= OMAP_BO_WC;
+
+ /* align dimensions to slot boundaries... */
+ tiler_align(gem2fmt(flags),
+ &gsize.tiled.width, &gsize.tiled.height);
+
+ /* ...and calculate size based on aligned dimensions */
+ size = tiler_size(gem2fmt(flags),
+ gsize.tiled.width, gsize.tiled.height);
+ } else {
+ size = PAGE_ALIGN(gsize.bytes);
+ }
+
+ omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+ if (!omap_obj) {
+ dev_err(dev->dev, "could not allocate GEM object\n");
+ goto fail;
+ }
+
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+
+ obj = &omap_obj->base;
+
+ if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+ /* attempt to allocate contiguous memory if we don't
+ * have DMM for remappign discontiguous buffers
+ */
+ omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
+ &omap_obj->paddr, GFP_KERNEL);
+ if (omap_obj->vaddr)
+ flags |= OMAP_BO_DMA;
+
+ }
+
+ omap_obj->flags = flags;
+
+ if (flags & OMAP_BO_TILED) {
+ omap_obj->width = gsize.tiled.width;
+ omap_obj->height = gsize.tiled.height;
+ }
+
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+ ret = drm_gem_private_object_init(dev, obj, size);
+ else
+ ret = drm_gem_object_init(dev, obj, size);
+
+ if (ret)
+ goto fail;
+
+ return obj;
+
+fail:
+ if (obj)
+ omap_gem_free_object(obj);
+
+ return NULL;
+}
+
+/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+void omap_gem_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ const enum tiler_fmt fmts[] = {
+ TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+ };
+ int i, j;
+
+ if (!dmm_is_available()) {
+ /* DMM only supported on OMAP4 and later, so this isn't fatal */
+ dev_warn(dev->dev, "DMM not available, disable DMM support\n");
+ return;
+ }
+
+ usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
+ if (!usergart) {
+ dev_warn(dev->dev, "could not allocate usergart\n");
+ return;
+ }
+
+ /* reserve 4k aligned/wide regions for userspace mappings: */
+ for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+ uint16_t h = 1, w = PAGE_SIZE >> i;
+ tiler_align(fmts[i], &w, &h);
+ /* note: since each region is 1 4kb page wide, and minimum
+ * number of rows, the height ends up being the same as the
+ * # of pages in the region
+ */
+ usergart[i].height = h;
+ usergart[i].height_shift = ilog2(h);
+ usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
+ usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+ for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+ struct usergart_entry *entry = &usergart[i].entry[j];
+ struct tiler_block *block =
+ tiler_reserve_2d(fmts[i], w, h,
+ PAGE_SIZE);
+ if (IS_ERR(block)) {
+ dev_err(dev->dev,
+ "reserve failed: %d, %d, %ld\n",
+ i, j, PTR_ERR(block));
+ return;
+ }
+ entry->paddr = tiler_ssptr(block);
+ entry->block = block;
+
+ DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+ entry->paddr,
+ usergart[i].stride_pfn << PAGE_SHIFT);
+ }
+ }
+
+ priv->has_dmm = true;
+}
+
+void omap_gem_deinit(struct drm_device *dev)
+{
+ /* I believe we can rely on there being no more outstanding GEM
+ * objects which could depend on usergart/dmm at this point.
+ */
+ kfree(usergart);
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
new file mode 100644
index 00000000000..ac74d1bc67b
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -0,0 +1,225 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *omap_gem_map_dma_buf(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attachment->dmabuf->priv;
+ struct sg_table *sg;
+ dma_addr_t paddr;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ /* camera, etc, need physically contiguous.. but we need a
+ * better way to know this..
+ */
+ ret = omap_gem_get_paddr(obj, &paddr, true);
+ if (ret)
+ goto out;
+
+ ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_init_table(sg->sgl, 1);
+ sg_dma_len(sg->sgl) = obj->size;
+ sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
+ sg_dma_address(sg->sgl) = paddr;
+
+ /* this should be after _get_paddr() to ensure we have pages attached */
+ omap_gem_dma_sync(obj, dir);
+
+ return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+
+static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attachment->dmabuf->priv;
+ omap_gem_put_paddr(obj);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void omap_gem_dmabuf_release(struct dma_buf *buffer)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ /* release reference that was taken when dmabuf was exported
+ * in omap_gem_prime_set()..
+ */
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+
+static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
+ size_t start, size_t len, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ /* TODO we would need to pin at least part of the buffer to
+ * get de-tiled view. For now just reject it.
+ */
+ return -ENOMEM;
+ }
+ /* make sure we have the pages: */
+ return omap_gem_get_pages(obj, &pages, true);
+}
+
+static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
+ size_t start, size_t len, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ omap_gem_put_pages(obj);
+}
+
+
+static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
+ unsigned long page_num)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ omap_gem_cpu_sync(obj, page_num);
+ return kmap_atomic(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
+ unsigned long page_num, void *addr)
+{
+ kunmap_atomic(addr);
+}
+
+static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
+ unsigned long page_num)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ omap_gem_cpu_sync(obj, page_num);
+ return kmap(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
+ unsigned long page_num, void *addr)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ kunmap(pages[page_num]);
+}
+
+/*
+ * TODO maybe we can split up drm_gem_mmap to avoid duplicating
+ * some here.. or at least have a drm_dmabuf_mmap helper.
+ */
+static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ int ret = 0;
+
+ if (WARN_ON(!obj->filp))
+ return -EINVAL;
+
+ /* Check for valid size. */
+ if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = obj;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ /* Take a ref for this mapping of the object, so that the fault
+ * handler can dereference the mmap offset's pointer to the object.
+ * This reference is cleaned up by the corresponding vm_close
+ * (which should happen whether the vma was created by this call, or
+ * by a vm_open due to mremap or partial unmap or whatever).
+ */
+ vma->vm_ops->open(vma);
+
+out_unlock:
+
+ return omap_gem_mmap_obj(obj, vma);
+}
+
+struct dma_buf_ops omap_dmabuf_ops = {
+ .map_dma_buf = omap_gem_map_dma_buf,
+ .unmap_dma_buf = omap_gem_unmap_dma_buf,
+ .release = omap_gem_dmabuf_release,
+ .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+ .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+ .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+ .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .kmap = omap_gem_dmabuf_kmap,
+ .kunmap = omap_gem_dmabuf_kunmap,
+ .mmap = omap_gem_dmabuf_mmap,
+};
+
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer)
+{
+ struct drm_gem_object *obj;
+
+ /* is this one of own objects? */
+ if (buffer->ops == &omap_dmabuf_ops) {
+ obj = buffer->priv;
+ /* is it from our device? */
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(buffer);
+ return obj;
+ }
+ }
+
+ /*
+ * TODO add support for importing buffers from other devices..
+ * for now we don't need this but would be nice to add eventually
+ */
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
new file mode 100644
index 00000000000..e4a66a35fc6
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -0,0 +1,169 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+
+int
+_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ size / PAGE_SIZE, 0, 0);
+
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
new file mode 100644
index 00000000000..e01303ee00c
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -0,0 +1,322 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+ uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *irq;
+ uint32_t irqmask = priv->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &priv->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ DBG("irqmask=%08x", irqmask);
+
+ dispc_write_irqenable(irqmask);
+ dispc_read_irqenable(); /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(irq->registered)) {
+ irq->registered = true;
+ list_add(&irq->node, &priv->irq_list);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(!irq->registered)) {
+ irq->registered = false;
+ list_del(&irq->node);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+ struct omap_drm_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_irq_wait *wait =
+ container_of(irq, struct omap_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count)
+{
+ struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ wait->irq.irq = wait_irq;
+ wait->irq.irqmask = irqmask;
+ wait->count = count;
+ omap_irq_register(dev, &wait->irq);
+ return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout)
+{
+ int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+ omap_irq_unregister(dev, &wait->irq);
+ kfree(wait);
+ if (ret == 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask |= pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask &= ~pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *handler, *n;
+ unsigned long flags;
+ unsigned int id;
+ u32 irqstatus;
+
+ irqstatus = dispc_read_irqstatus();
+ dispc_clear_irqstatus(irqstatus);
+ dispc_read_irqstatus(); /* flush posted write */
+
+ VERB("irqs: %08x", irqstatus);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (irqstatus & pipe2vbl(id))
+ drm_handle_vblank(dev, id);
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+ if (handler->irqmask & irqstatus) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & irqstatus);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ dispc_runtime_get();
+ dispc_clear_irqstatus(0xffffffff);
+ dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *error_handler = &priv->error_handler;
+
+ DBG("dev=%p", dev);
+
+ INIT_LIST_HEAD(&priv->irq_list);
+
+ error_handler->irq = omap_irq_error_handler;
+ error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+ /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+ * we just need to ignore it while enabling tv-out
+ */
+ error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+ omap_irq_register(dev, error_handler);
+
+ return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ // TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss. Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev->irq_enabled) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+
+ ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* After installing handler */
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ dispc_free_irq(dev);
+ }
+
+ return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+ unsigned long irqflags;
+ int irq_enabled, i;
+
+ mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * Wake up any waiters so they don't hang.
+ */
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+
+ if (!irq_enabled)
+ return -EINVAL;
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ dispc_free_irq(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
new file mode 100644
index 00000000000..dd68d14ce61
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -0,0 +1,450 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_plane.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* some hackery because omapdss has an 'enum omap_plane' (which would be
+ * better named omap_plane_id).. and compiler seems unhappy about having
+ * both a 'struct omap_plane' and 'enum omap_plane'
+ */
+#define omap_plane _omap_plane
+
+/*
+ * plane funcs
+ */
+
+struct callback {
+ void (*fxn)(void *);
+ void *arg;
+};
+
+#define to_omap_plane(x) container_of(x, struct omap_plane, base)
+
+struct omap_plane {
+ struct drm_plane base;
+ int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+ const char *name;
+ struct omap_overlay_info info;
+ struct omap_drm_apply apply;
+
+ /* position/orientation of scanout within the fb: */
+ struct omap_drm_window win;
+ bool enabled;
+
+ /* last fb that we pinned: */
+ struct drm_framebuffer *pinned_fb;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ struct omap_drm_irq error_irq;
+
+ /* set of bo's pending unpin until next post_apply() */
+ DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+
+ // XXX maybe get rid of this and handle vblank in crtc too?
+ struct callback apply_done_cb;
+};
+
+static void unpin(void *arg, struct drm_gem_object *bo)
+{
+ struct drm_plane *plane = arg;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ if (kfifo_put(&omap_plane->unpin_fifo,
+ (const struct drm_gem_object **)&bo)) {
+ /* also hold a ref so it isn't free'd while pinned */
+ drm_gem_object_reference(bo);
+ } else {
+ dev_err(plane->dev->dev, "unpin fifo full!\n");
+ omap_gem_put_paddr(bo);
+ }
+}
+
+/* update which fb (if any) is pinned for scanout */
+static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
+
+ if (pinned_fb != fb) {
+ int ret;
+
+ DBG("%p -> %p", pinned_fb, fb);
+
+ if (fb)
+ drm_framebuffer_reference(fb);
+
+ ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+
+ if (pinned_fb)
+ drm_framebuffer_unreference(pinned_fb);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "could not swap %p -> %p\n",
+ omap_plane->pinned_fb, fb);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ omap_plane->pinned_fb = NULL;
+ return ret;
+ }
+
+ omap_plane->pinned_fb = fb;
+ }
+
+ return 0;
+}
+
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
+ struct omap_drm_window *win = &omap_plane->win;
+ struct drm_plane *plane = &omap_plane->base;
+ struct drm_device *dev = plane->dev;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_crtc *crtc = plane->crtc;
+ enum omap_channel channel;
+ bool enabled = omap_plane->enabled && crtc;
+ bool ilace, replication;
+ int ret;
+
+ DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+ /* if fb has changed, pin new fb: */
+ update_pin(plane, enabled ? plane->fb : NULL);
+
+ if (!enabled) {
+ dispc_ovl_enable(omap_plane->id, false);
+ return;
+ }
+
+ channel = omap_crtc_channel(crtc);
+
+ /* update scanout: */
+ omap_framebuffer_update_scanout(plane->fb, win, info);
+
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+ info->out_width, info->out_height,
+ info->screen_width);
+ DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+ info->paddr, info->p_uv_addr);
+
+ /* TODO: */
+ ilace = false;
+ replication = false;
+
+ /* and finally, update omapdss: */
+ ret = dispc_ovl_setup(omap_plane->id, info,
+ replication, omap_crtc_timings(crtc), false);
+ if (ret) {
+ dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+ return;
+ }
+
+ dispc_ovl_enable(omap_plane->id, true);
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
+ struct drm_plane *plane = &omap_plane->base;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_gem_object *bo = NULL;
+ struct callback cb;
+
+ cb = omap_plane->apply_done_cb;
+ omap_plane->apply_done_cb.fxn = NULL;
+
+ while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+ omap_gem_put_paddr(bo);
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ if (cb.fxn)
+ cb.fxn(cb.arg);
+
+ if (omap_plane->enabled) {
+ omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+ info->out_width, info->out_height);
+ }
+}
+
+static int apply(struct drm_plane *plane)
+{
+ if (plane->crtc) {
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+ }
+ return 0;
+}
+
+int omap_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ void (*fxn)(void *), void *arg)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_drm_window *win = &omap_plane->win;
+
+ win->crtc_x = crtc_x;
+ win->crtc_y = crtc_y;
+ win->crtc_w = crtc_w;
+ win->crtc_h = crtc_h;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ win->src_x = src_x >> 16;
+ win->src_y = src_y >> 16;
+ win->src_w = src_w >> 16;
+ win->src_h = src_h >> 16;
+
+ if (fxn) {
+ /* omap_crtc should ensure that a new page flip
+ * isn't permitted while there is one pending:
+ */
+ BUG_ON(omap_plane->apply_done_cb.fxn);
+
+ omap_plane->apply_done_cb.fxn = fxn;
+ omap_plane->apply_done_cb.arg = arg;
+ }
+
+ plane->fb = fb;
+ plane->crtc = crtc;
+
+ return apply(plane);
+}
+
+static int omap_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ omap_plane->enabled = true;
+ return omap_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h,
+ NULL, NULL);
+}
+
+static int omap_plane_disable(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ omap_plane->win.rotation = BIT(DRM_ROTATE_0);
+ return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_plane_destroy(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ DBG("%s", omap_plane->name);
+
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
+ omap_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
+ kfifo_free(&omap_plane->unpin_fifo);
+
+ kfree(omap_plane);
+}
+
+int omap_plane_dpms(struct drm_plane *plane, int mode)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+ int ret = 0;
+
+ if (enabled != omap_plane->enabled) {
+ omap_plane->enabled = enabled;
+ ret = apply(plane);
+ }
+
+ return ret;
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void omap_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ struct drm_device *dev = plane->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_property *prop;
+
+ if (priv->has_dmm) {
+ prop = priv->rotation_prop;
+ if (!prop) {
+ const struct drm_prop_enum_list props[] = {
+ { DRM_ROTATE_0, "rotate-0" },
+ { DRM_ROTATE_90, "rotate-90" },
+ { DRM_ROTATE_180, "rotate-180" },
+ { DRM_ROTATE_270, "rotate-270" },
+ { DRM_REFLECT_X, "reflect-x" },
+ { DRM_REFLECT_Y, "reflect-y" },
+ };
+ prop = drm_property_create_bitmask(dev, 0, "rotation",
+ props, ARRAY_SIZE(props));
+ if (prop == NULL)
+ return;
+ priv->rotation_prop = prop;
+ }
+ drm_object_attach_property(obj, prop, 0);
+ }
+
+ prop = priv->zorder_prop;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
+ if (prop == NULL)
+ return;
+ priv->zorder_prop = prop;
+ }
+ drm_object_attach_property(obj, prop, 0);
+}
+
+int omap_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ int ret = -EINVAL;
+
+ if (property == priv->rotation_prop) {
+ DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
+ omap_plane->win.rotation = val;
+ ret = apply(plane);
+ } else if (property == priv->zorder_prop) {
+ DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
+ omap_plane->info.zorder = val;
+ ret = apply(plane);
+ }
+
+ return ret;
+}
+
+static const struct drm_plane_funcs omap_plane_funcs = {
+ .update_plane = omap_plane_update,
+ .disable_plane = omap_plane_disable,
+ .destroy = omap_plane_destroy,
+ .set_property = omap_plane_set_property,
+};
+
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_plane *omap_plane =
+ container_of(irq, struct omap_plane, error_irq);
+ DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
+/* initialize plane */
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ int id, bool private_plane)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane = NULL;
+ struct omap_plane *omap_plane;
+ struct omap_overlay_info *info;
+ int ret;
+
+ DBG("%s: priv=%d", plane_names[id], private_plane);
+
+ omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
+ if (!omap_plane) {
+ dev_err(dev->dev, "could not allocate plane\n");
+ goto fail;
+ }
+
+ ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unpin FIFO\n");
+ goto fail;
+ }
+
+ omap_plane->nformats = omap_framebuffer_get_formats(
+ omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
+ dss_feat_get_supported_color_modes(id));
+ omap_plane->id = id;
+ omap_plane->name = plane_names[id];
+
+ plane = &omap_plane->base;
+
+ omap_plane->apply.pre_apply = omap_plane_pre_apply;
+ omap_plane->apply.post_apply = omap_plane_post_apply;
+
+ omap_plane->error_irq.irqmask = error_irqs[id];
+ omap_plane->error_irq.irq = omap_plane_error_irq;
+ omap_irq_register(dev, &omap_plane->error_irq);
+
+ drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+ omap_plane->formats, omap_plane->nformats, private_plane);
+
+ omap_plane_install_properties(plane, &plane->base);
+
+ /* get our starting configuration, set defaults for parameters
+ * we don't currently use, etc:
+ */
+ info = &omap_plane->info;
+ info->rotation_type = OMAP_DSS_ROT_DMA;
+ info->rotation = OMAP_DSS_ROT_0;
+ info->global_alpha = 0xff;
+ info->mirror = 0;
+
+ /* Set defaults depending on whether we are a CRTC or overlay
+ * layer.
+ * TODO add ioctl to give userspace an API to change this.. this
+ * will come in a subsequent patch.
+ */
+ if (private_plane)
+ omap_plane->info.zorder = 0;
+ else
+ omap_plane->info.zorder = id;
+
+ return plane;
+
+fail:
+ if (plane)
+ omap_plane_destroy(plane);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
new file mode 100644
index 00000000000..efb60951054
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -0,0 +1,703 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "tcm-sita.h"
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ spin_lock_init(&(pvt->lock));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ spin_lock(&(pvt->lock));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ spin_lock(&(pvt->lock));
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reserved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ spin_lock(&(pvt->lock));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ spin_unlock(&(pvt->lock));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+ /* change upper x bound */
+ end_x = x + 1;
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+ /* change upper x bound */
+ end_x = x - 1;
+
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ if (found == num_slots)
+ break;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & CR_BIAS_HORIZONTAL;
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
new file mode 100644
index 00000000000..0444f868671
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
@@ -0,0 +1,95 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ spinlock_t lock; /* spinlock to protect access */
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+ a->p0.x = x0;
+ a->p0.y = y0;
+ a->p1.x = x1;
+ a->p1.y = y1;
+}
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
new file mode 100644
index 00000000000..a8d5ce47686
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/tcm.h
@@ -0,0 +1,328 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+ u16 x;
+ u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+ bool is2d; /* whether area is 1d or 2d */
+ struct tcm *tcm; /* parent */
+ struct tcm_pt p0;
+ struct tcm_pt p1;
+};
+
+struct tcm {
+ u16 width, height; /* container dimensions */
+ int lut_id; /* Lookup table identifier */
+
+ unsigned int y_offset; /* offset to use for y coordinates */
+
+ /* 'pvt' structure shall contain any tcm details (attr) along with
+ linked list of allocated areas and mutex for mutually exclusive access
+ to the list. It may also contain copies of width and height to notice
+ any changes to the publicly available width and height fields. */
+ void *pvt;
+
+ /* function table */
+ s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+ struct tcm_area *area);
+ s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+ s32 (*free) (struct tcm *tcm, struct tcm_area *area);
+ void (*deinit) (struct tcm *tcm);
+};
+
+/*=============================================================================
+ BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ * area pointer is NULL
+ * width and height fits within container
+ * number of pages is more than the size of the container
+ *
+ */
+
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error. The call
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+ if (tcm)
+ tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param height Height(in pages) of area to be reserved.
+ * @param width Width(in pages) of area to be reserved.
+ * @param align Alignment requirement for top-left corner of area. Not
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+ u16 align, struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ (align & (align - 1))) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param slots Number of (contiguous) slots to reserve.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+ struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area Pointer to area reserved by a prior call to
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
+ *
+ * @return 0 on success. Non-0 error code on failure. Also, the tcm
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+ s32 res = 0; /* free succeeds by default */
+
+ if (area && area->tcm) {
+ res = area->tcm->free(area->tcm, area);
+ if (res == 0)
+ area->tcm = NULL;
+ }
+
+ return res;
+}
+
+/*=============================================================================
+ HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter. The 'parent' parameter will get modified to
+ * contain the remaining portion of the area. If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent Pointer to a VALID parent area that will get modified
+ * @param slice Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+ *slice = *parent;
+
+ /* check if we need to slice */
+ if (slice->tcm && !slice->is2d &&
+ slice->p0.y != slice->p1.y &&
+ (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+ /* set end point of slice (start always remains) */
+ slice->p1.x = slice->tcm->width - 1;
+ slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+ /* adjust remaining area */
+ parent->p0.x = 0;
+ parent->p0.y = slice->p1.y + 1;
+ } else {
+ /* mark this as the last slice */
+ parent->tcm = NULL;
+ }
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+ return area && area->tcm &&
+ /* coordinate bounds */
+ area->p1.x < area->tcm->width &&
+ area->p1.y < area->tcm->height &&
+ area->p0.y <= area->p1.y &&
+ /* 1D coordinate relationship + p0.x check */
+ ((!area->is2d &&
+ area->p0.x < area->tcm->width &&
+ area->p0.x + area->p0.y * area->tcm->width <=
+ area->p1.x + area->p1.y * area->tcm->width) ||
+ /* 2D coordinate relationship */
+ (area->is2d &&
+ area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+ u16 i;
+
+ if (a->is2d) {
+ return p->x >= a->p0.x && p->x <= a->p1.x &&
+ p->y >= a->p0.y && p->y <= a->p1.y;
+ } else {
+ i = p->x + p->y * a->tcm->width;
+ return i >= a->p0.x + a->p0.y * a->tcm->width &&
+ i <= a->p1.x + a->p1.y * a->tcm->width;
+ }
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+ return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+ return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+ return area->is2d ?
+ __tcm_area_width(area) * __tcm_area_height(area) :
+ (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+ area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+ if (__tcm_sizeof(a) < num_pg)
+ return -ENOMEM;
+ if (!num_pg)
+ return -EINVAL;
+
+ a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+ a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+ return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var Name of a local variable of type 'struct
+ * tcm_area *' that will get modified to
+ * contain each slice.
+ * @param area Pointer to the VALID parent area. This
+ * structure will not get modified
+ * throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+ for (safe = area, \
+ tcm_slice(&safe, &var); \
+ var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 515e5ee1f9e..b1746741bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct radeon_fbdev *rfbdev,
+static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
@@ -300,22 +301,6 @@ out_unref:
return ret;
}
-static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = radeonfb_create(rfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -349,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeon_fb_find_or_create_single,
+ .fb_probe = radeonfb_create,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -379,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1d8ff2f850b..93f760e27a9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 97993c6835f..03914953cb1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -39,10 +39,6 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
host1x->fbdev = fbdev;
return 0;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b9feec9d08d..9f4be3d4a02 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -476,9 +476,10 @@ udl_framebuffer_init(struct drm_device *dev,
}
-static int udlfb_create(struct udl_fbdev *ufbdev,
+static int udlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct device *device = &dev->usbdev->dev;
@@ -556,27 +557,10 @@ out:
return ret;
}
-static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = udlfb_create(ufbdev, sizes);
- if (ret)
- return ret;
-
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.gamma_set = udl_crtc_fb_gamma_set,
.gamma_get = udl_crtc_fb_gamma_get,
- .fb_probe = udl_fb_find_or_create_single,
+ .fb_probe = udlfb_create,
};
static void udl_fbdev_destroy(struct drm_device *dev,
@@ -619,6 +603,10 @@ int udl_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
return 0;
}