aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-09 09:17:02 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-09 09:19:14 +0100
commitf7698ba75fa283435f5077b9dfb4319d28b9de9a (patch)
tree4bc16a615a35baaf2b482de81cd256a69067ff72 /drivers/gpu/drm
parent798183c54799fbe1e5a5bfabb3a8c0505ffd2149 (diff)
parent374b105797c3d4f29c685f3be535c35f5689b30e (diff)
Merge tag 'v3.13-rc3' into drm-intel-next-queued
Linux 3.13-rc3 I need a backmerge for two reasons: - For merging the ppgtt patches from Ben I need to pull in the bdw support. - We now have duplicated calls to intel_uncore_forcewake_reset in the setup code to due 2 different patches merged into -next and 3.13. The conflict is silen so I need the merge to be able to apply Deepak's fixup patch. Conflicts: drivers/gpu/drm/i915/intel_display.c Trivial conflict, it doesn't even show up in the merge diff. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c8
-rw-r--r--drivers/gpu/drm/drm_flip_work.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c17
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c57
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c13
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h36
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
87 files changed, 1141 insertions, 467 deletions
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 0cfb60f5476..d18b88b755c 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail;
}
- if (!client->driver) {
+ if (!client->dev.driver) {
err = -ENODEV;
goto fail_unregister;
}
- module = client->driver->driver.owner;
+ module = client->dev.driver->owner;
if (!try_module_get(module)) {
err = -ENODEV;
goto fail_unregister;
@@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
encoder->bus_priv = client;
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+ encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
@@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
+ struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
encoder->bus_priv = NULL;
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index e788882d902..f9c7fa3d001 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -34,7 +34,7 @@
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
- if (kfifo_put(&work->fifo, (const void **)&val)) {
+ if (kfifo_put(&work->fifo, val)) {
atomic_inc(&work->pending);
} else {
DRM_ERROR("%s fifo full!\n", work->name);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a7176ce254..c5b929c3f77 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -147,7 +147,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int minor_id = iminor(inode);
int err = -ENODEV;
- const struct file_operations *old_fops;
+ const struct file_operations *new_fops;
DRM_DEBUG("\n");
@@ -162,18 +162,13 @@ int drm_stub_open(struct inode *inode, struct file *filp)
if (drm_device_is_unplugged(dev))
goto out;
- old_fops = filp->f_op;
- filp->f_op = fops_get(dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
+ new_fops = fops_get(dev->driver->fops);
+ if (!new_fops)
goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
out:
mutex_unlock(&drm_global_mutex);
return err;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d80d95289e1..64c34d5876f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -455,8 +455,8 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
- /* Fields of interlaced scanout modes are only halve a frame duration.
- * Double the dotclock to get halve the frame-/line-/pixelduration.
+ /* Fields of interlaced scanout modes are only half a frame duration.
+ * Double the dotclock to get half the frame-/line-/pixelduration.
*/
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
dotclock *= 2;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c200136a5d8..f53d5246979 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -338,7 +338,7 @@ err_idr:
*/
static void drm_unplug_minor(struct drm_minor *minor)
{
- if (!minor || !device_is_registered(minor->kdev))
+ if (!minor || !minor->kdev)
return;
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a35ea53106..c22c3097c3e 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
int drm_sysfs_device_add(struct drm_minor *minor)
{
char *minor_str;
+ int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
@@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- minor->kdev = device_create(drm_class, minor->dev->dev,
- MKDEV(DRM_MAJOR, minor->index),
- minor, minor_str, minor->index);
- if (IS_ERR(minor->kdev)) {
- DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
- return PTR_ERR(minor->kdev);
+ minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+ if (!minor->kdev) {
+ r = -ENOMEM;
+ goto error;
}
+
+ device_initialize(minor->kdev);
+ minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ minor->kdev->class = drm_class;
+ minor->kdev->type = &drm_sysfs_device_minor;
+ minor->kdev->parent = minor->dev->dev;
+ minor->kdev->release = drm_sysfs_release;
+ dev_set_drvdata(minor->kdev, minor);
+
+ r = dev_set_name(minor->kdev, minor_str, minor->index);
+ if (r < 0)
+ goto error;
+
+ r = device_add(minor->kdev);
+ if (r < 0)
+ goto error;
+
return 0;
+
+error:
+ DRM_ERROR("device create failed %d\n", r);
+ put_device(minor->kdev);
+ return r;
}
/**
@@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
void drm_sysfs_device_remove(struct drm_minor *minor)
{
if (minor->kdev)
- device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+ device_unregister(minor->kdev);
minor->kdev = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 3a1e6d9b25f..b676006a95a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -285,7 +285,11 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3271fd4b172..7bccedca487 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,6 +383,8 @@ out:
g2d_userptr->npages,
g2d_userptr->vma);
+ exynos_gem_put_vma(g2d_userptr->vma);
+
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 45d5af0546b..5b646c1f0c3 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -39,7 +39,7 @@
#include "psb_intel_reg.h"
#include "mdfld_output.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 27d3875d895..368a03ae301 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -26,7 +26,7 @@
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include "mid_bios.h"
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 1eb86c79523..e2810706114 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -99,7 +99,7 @@ static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
i2c_dev->status = I2C_STAT_INIT;
i2c_dev->msg = pmsg;
i2c_dev->buf_offset = 0;
- INIT_COMPLETION(i2c_dev->complete);
+ reinit_completion(&i2c_dev->complete);
/* Enable I2C transaction */
temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 4c17c93d8d1..5e069786273 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -22,7 +22,7 @@
#include <linux/i2c.h>
#include <drm/drmP.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 64ed8f4d991..780f815b6c9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1842,6 +1842,7 @@ struct drm_i915_file_private {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 57fe1ae32a0..dfff0907f70 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -193,16 +193,14 @@ out:
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, intel_handle;
- acpi_status status;
+ acpi_handle dhandle;
int ret;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM")) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6dd622d733b..e4fba39631a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
- dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+ !HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cc0a63349a9..86dc6ecd357 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1413,6 +1413,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
default:
break;
}
+
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1f7af63b31d..596ad09f0e5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6705,6 +6705,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6712,6 +6715,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6749,6 +6755,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
if (!i915_enable_pc8)
return;
@@ -6772,6 +6781,9 @@ done:
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
@@ -6782,6 +6794,9 @@ static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
@@ -7375,7 +7390,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7404,7 +7421,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
+ POSTING_READ(CURBASE_IVB(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -9439,8 +9458,7 @@ check_crtc_state(struct drm_device *dev)
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config &&
- encoder->get_hw_state(encoder, &pipe))
+ if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -11107,8 +11125,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- if (encoder->get_config)
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d45b311c1ab..f3b17b11b27 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1776,7 +1776,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 853d13ea055..3da259e280b 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -621,7 +621,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp;
int i = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ac4a74a4134..41b6e080e36 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1632,7 +1632,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -3858,7 +3858,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 18c406246a2..22cf0f4ba24 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
}
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b737a32dd39..feb2d669254 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -301,6 +301,19 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -318,19 +331,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
-}
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
+ intel_uncore_forcewake_reset(dev);
}
void intel_uncore_sanitize(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 606598f226f..8d06eef2b9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -256,7 +256,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index e21453a9497..9ac94d4e564 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -494,13 +494,6 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
- show &= ~0x00200000;
- }
- }
-
if (stat & 0x00800000) {
if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index fcd449e5aba..04f412922d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -481,13 +481,6 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
- show &= ~0x00200000;
- }
- }
-
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 64dca260912..fe67415c3e1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -1039,7 +1039,7 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
- tpc_set |= 1 << ((gpc * 8) + tpc);
+ tpc_set |= 1ULL << ((gpc * 8) + tpc);
}
nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index e286e132c7e..129120473f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
acpi_handle handle;
int ret;
- handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+ handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
index 9908f1f05a0..d4fd3bc9c66 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -32,6 +32,11 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
struct nouveau_subdev *subdev = nv_subdev(ppwr);
u32 addr;
+ /* wait for a free slot in the fifo */
+ addr = nv_rd32(ppwr, 0x10a4a0);
+ if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
+ return -EBUSY;
+
/* we currently only support a single process at a time waiting
* on a synchronous reply, take the PPWR mutex and tell the
* receive handler what we're waiting for
@@ -42,11 +47,6 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
ppwr->recv.process = process;
}
- /* wait for a free slot in the fifo */
- addr = nv_rd32(ppwr, 0x10a4a0);
- if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
- return -EBUSY;
-
/* acquire data segment access */
do {
nv_wr32(ppwr, 0x10a580, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 21b2b3021fa..80e584a1bd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -117,7 +117,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
priv->fan->bios.linear_max_temp) {
duty = nouveau_therm_update_linear(therm);
} else {
- duty = priv->cstate;
+ if (priv->cstate)
+ duty = priv->cstate;
poll = false;
}
immd = false;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 13b85007644..e44ed7b93c6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
if (!client)
return false;
- if (!client->driver || client->driver->detect(client, info)) {
+ if (!client->dev.driver ||
+ to_i2c_driver(client->dev.driver)->detect(client, info)) {
i2c_unregister_device(client);
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 3621e7f2347..6828d81ed7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -298,7 +298,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- if (device->card_type < NV_C0) {
+ if (device->card_type < NV_10) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = NvSw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f9a2df29a59..95c74045404 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, nvidia_handle;
- acpi_status status;
+ acpi_handle dhandle;
int retval = 0;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -417,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
return false;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -451,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 949ab0cbc4a..c0fde6b9393 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,12 +98,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
if (tile) {
spin_lock(&drm->tile.lock);
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
+ tile->fence = nouveau_fence_ref(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1462,14 +1457,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
+ struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
- if (likely(fence))
- nouveau_fence_ref(fence);
-
spin_lock(&nvbo->bo.bdev->fence_lock);
old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
+ nvbo->bo.sync_obj = new_fence;
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref(&old_fence);
@@ -1552,7 +1545,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (node->sg)
nouveau_vm_map_sg_table(vma, 0, size, node);
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e84f4c32331..cc5152be2cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -346,22 +346,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING(chan, 0x00000000);
- /* allocate software object class (used for fences on <= nv05, and
- * to signal flip completion), bind it to a subchannel.
- */
- if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
+ /* allocate software object class (used for fences on <= nv05) */
+ if (device->card_type < NV_10) {
ret = nouveau_object_new(nv_object(client), chan->handle,
- NvSw, nouveau_abi16_swclass(chan->drm),
- NULL, 0, &object);
+ NvSw, 0x006e, NULL, 0, &object);
if (ret)
return ret;
swch = (void *)object->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
- }
- if (device->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 44642d9094e..7809d92183c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,7 +26,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
@@ -399,6 +398,11 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ if (nv_device(drm->device)->chipset < 0x11)
+ dev->mode_config.async_page_flip = false;
+ else
+ dev->mode_config.async_page_flip = true;
+
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
@@ -555,19 +559,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
/* Emit the pageflip */
- ret = RING_SPACE(chan, 3);
+ ret = RING_SPACE(chan, 2);
if (ret)
goto fail;
- if (nv_device(drm->device)->card_type < NV_C0) {
+ if (nv_device(drm->device)->card_type < NV_C0)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING (chan, 0x00000000);
- OUT_RING (chan, 0x00000000);
- } else {
- BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, 0);
- BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
- }
+ else
+ BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
ret = nouveau_fence_new(chan, false, pfence);
@@ -584,22 +584,16 @@ fail:
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+ struct drm_pending_vblank_event *event, u32 flags)
{
+ const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan = NULL;
+ struct nouveau_channel *chan = drm->channel;
struct nouveau_fence *fence;
- struct ttm_validate_buffer resv[2] = {
- { .bo = &old_bo->bo },
- { .bo = &new_bo->bo },
- };
- struct ww_acquire_ctx ticket;
- LIST_HEAD(res);
int ret;
if (!drm->channel)
@@ -609,26 +603,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!s)
return -ENOMEM;
- /* Choose the channel the flip will be handled in */
- spin_lock(&old_bo->bo.bdev->fence_lock);
- fence = new_bo->bo.sync_obj;
- if (fence)
- chan = fence->channel;
- if (!chan)
- chan = drm->channel;
- spin_unlock(&old_bo->bo.bdev->fence_lock);
+ /* synchronise rendering channel with the kernel's channel */
+ spin_lock(&new_bo->bo.bdev->fence_lock);
+ fence = nouveau_fence_ref(new_bo->bo.sync_obj);
+ spin_unlock(&new_bo->bo.bdev->fence_lock);
+ ret = nouveau_fence_sync(fence, chan);
+ if (ret)
+ return ret;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
if (ret)
goto fail_free;
-
- list_add(&resv[1].head, &res);
}
- list_add(&resv[0].head, &res);
mutex_lock(&chan->cli->mutex);
- ret = ttm_eu_reserve_buffers(&ticket, &res);
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
@@ -640,12 +630,29 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- ret = nv50_display_flip_next(crtc, fb, chan, 0);
+ ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
} else {
struct nv04_display *dispnv04 = nv04_display(dev);
- nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
+ int head = nouveau_crtc(crtc)->index;
+
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ goto fail_unreserve;
+
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
+ }
+
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -656,14 +663,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */
crtc->fb = fb;
- ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
- ttm_eu_backoff_reservation(&ticket, &res);
+ ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&chan->cli->mutex);
if (old_bo != new_bo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 690d5930ce3..984004d66a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -51,9 +51,11 @@ enum {
NvSubCtxSurf2D = 0,
NvSubSw = 1,
NvSubImageBlit = 2,
- NvSub2D = 3,
NvSubGdiRect = 3,
- NvSubCopy = 4,
+
+ NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+ NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+ FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
};
/* Object handles. */
@@ -194,7 +196,6 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
-#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2418b0de589..7a3759f1c41 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,7 @@
#include <engine/device.h>
#include <engine/disp.h>
#include <engine/fifo.h>
+#include <engine/software.h>
#include <subdev/vm.h>
@@ -191,6 +192,32 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
+ ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
+ nouveau_abi16_swclass(drm), NULL, 0, &object);
+ if (ret == 0) {
+ struct nouveau_software_chan *swch = (void *)object->parent;
+ ret = RING_SPACE(drm->channel, 2);
+ if (ret == 0) {
+ if (device->card_type < NV_C0) {
+ BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
+ OUT_RING (drm->channel, NVDRM_NVSW);
+ } else
+ if (device->card_type < NV_E0) {
+ BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
+ OUT_RING (drm->channel, 0x001f0000);
+ }
+ }
+ swch = (void *)object->parent;
+ swch->flip = nouveau_flip_complete;
+ swch->flip_data = drm->channel;
+ }
+
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
+ nouveau_accel_fini(drm);
+ return;
+ }
+
if (device->card_type < NV_C0) {
ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
&drm->notify);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 71ed2dadae6..4b0fb6c66be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -56,6 +56,7 @@ enum nouveau_drm_handle {
NVDRM_CONTROL = 0xdddddddc,
NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
NVDRM_CHAN = 0xcccc0000, /* |= client chid */
+ NVDRM_NVSW = 0x55550000,
};
struct nouveau_cli {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 34b82711e7c..40cf52e6d6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -306,7 +306,8 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *fence)
{
- kref_get(&fence->kref);
+ if (fence)
+ kref_get(&fence->kref);
return fence;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 418a6177a65..78a27f8ad7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -106,8 +106,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
if (mapped) {
spin_lock(&nvbo->bo.bdev->fence_lock);
- if (nvbo->bo.sync_obj)
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
}
@@ -309,7 +308,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
- nouveau_bo_fence(nvbo, fence);
+ if (likely(fence))
+ nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -438,8 +438,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
int ret = 0;
spin_lock(&nvbo->bo.bdev->fence_lock);
- if (nvbo->bo.sync_obj)
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (fence) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 38a4db5bfe2..4aff04fa483 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -630,7 +630,6 @@ error:
hwmon->hwmon = NULL;
return ret;
#else
- hwmon->hwmon = NULL;
return 0;
#endif
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 77dcc9c5077..8fe32bbed99 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -255,6 +255,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, NvCtxSurf2D);
BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
+ if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 2);
+ }
BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf667859cb..701c4c10e08 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
}
/* set dma mask for device */
- /* NOTE: this is a workaround for the hwmod not initializing properly */
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0109a9644cb..821ab7b9409 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
+ kfree(entry);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index deaf98cdca3..0652ee0a209 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
return -EINVAL;
}
args.ucRegIndex = buf[0];
- if (num > 1)
- memcpy(&out, &buf[1], num - 1);
+ if (num > 1) {
+ num--;
+ memcpy(&out, &buf[1], num);
+ }
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ae92aa041c6..b43a3a3c906 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
*
* Returns the value in the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
- if (offset < rdev->doorbell.size) {
- return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ return readl(rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
- if (offset < rdev->doorbell.size) {
- writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ writel(v, rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 4) {
@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 2) {
@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
+/* TODO: figure out why semaphore cause lockups */
+#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+ return true;
+#else
+ return false;
+#endif
}
/**
@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
- WDOORBELL32(ring->doorbell_offset, ring->wptr);
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
}
/**
@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r;
}
- /* doorbell offset */
- rdev->ring[idx].doorbell_offset =
- (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
-
/* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd));
@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |=
- DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
+ DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT);
@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 9c9529de20e..0300727a4f7 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK).
*/
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+ return true;
}
/**
@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470de0a..920e1e4a52c 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+#endif
u32 tmp;
udelay(10);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 52f1ae16f65..9702e55e924 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1174,23 +1174,16 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
- u16 ctl, v;
- int err;
-
- err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
- if (err)
- return;
-
- v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+ int readrq;
+ u16 v;
+ readrq = pcie_get_readrq(rdev->pdev);
+ v = ffs(readrq) - 8;
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
- if ((v == 0) || (v == 6) || (v == 7)) {
- ctl &= ~PCI_EXP_DEVCTL_READRQ;
- ctl |= (2 << 12);
- pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
- }
+ if ((v == 0) || (v == 6) || (v == 7))
+ pcie_set_readrq(rdev->pdev, 512);
}
void dce4_program_fmt(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 6a0656d00ed..a37b5443638 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f2633902815..cdc003085a7 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 784983d7815..10abc4d5a6c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
+ return false;
}
int r100_copy_blit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e609e8a8d2..9ad06732a78 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+
+ return true;
}
/**
@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b317456512..7844d15c139 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+
+ return true;
}
/**
@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b9ee9925860..ecf2a3960c0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
@@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter);
+ int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* GPU doorbell structures, functions & helpers
*/
+#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
+
struct radeon_doorbell {
- u32 num_pages;
- bool free[1024];
/* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- void __iomem *ptr;
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
+ unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
- u32 doorbell_page_num;
- u32 doorbell_offset;
+ u32 doorbell_index;
unsigned wptr_offs;
};
@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1979,6 +1982,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
+ uint32_t macrotile_mode_array[16];
};
union radeon_asic_config {
@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
-#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
-#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
+#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
+#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/*
* Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 10f98c7742d..98a9074b306 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
count = radeon_atif_get_sbios_requests(handle, &req);
if (count <= 0)
@@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
struct radeon_atcs *atcs = &rdev->atcs;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
u32 retry = 3;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
int ret;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 50853c0cb49..e354ce94cdd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
@@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2833ee3a61..c9fd97b5807 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
/* uvd v3.1 */
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6153ec18943..9d302eaeea1 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -8,8 +8,7 @@
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/pci.h>
#include "radeon_acpi.h"
@@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
acpi_handle dhandle, atpx_handle;
acpi_status status;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -493,7 +492,7 @@ static int radeon_atpx_init(void)
*/
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c155d6f3fa6..b3633d9a531 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 26ca223d12d..f41594b2eea 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+ radeon_semaphore_sync_to(p->ib.semaphore,
+ p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_ib_sync_to(&parser->ib, vm->fence);
- radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
- rdev, vm, parser->ring));
+ radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(parser->ib.semaphore,
+ radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b9234c43f43..39b033b441d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
int radeon_doorbell_init(struct radeon_device *rdev)
{
- int i;
-
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
- /* limit to 4 MB for now */
- if (rdev->doorbell.size > (4 * 1024 * 1024))
- rdev->doorbell.size = 4 * 1024 * 1024;
+ rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
+ if (rdev->doorbell.num_doorbells == 0)
+ return -EINVAL;
- rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
+ rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
- rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
+ memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- rdev->doorbell.free[i] = true;
- }
return 0;
}
@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
}
/**
- * radeon_doorbell_get - Allocate a doorbell page
+ * radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Allocate a doorbell page for use by the driver (all asics).
+ * Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
- int i;
-
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- if (rdev->doorbell.free[i]) {
- rdev->doorbell.free[i] = false;
- *doorbell = i;
- return 0;
- }
+ unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
+ if (offset < rdev->doorbell.num_doorbells) {
+ __set_bit(offset, rdev->doorbell.used);
+ *doorbell = offset;
+ return 0;
+ } else {
+ return -EINVAL;
}
- return -EINVAL;
}
/**
- * radeon_doorbell_free - Free a doorbell page
+ * radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Free a doorbell page allocated for use by the driver (all asics)
+ * Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
- if (doorbell < rdev->doorbell.num_pages)
- rdev->doorbell.free[doorbell] = true;
+ if (doorbell < rdev->doorbell.num_doorbells)
+ __clear_bit(doorbell, rdev->doorbell.used);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1aee32213f6..9f5ff28864f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -76,9 +76,10 @@
* 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
+ * 2.35.0 - Add CIK macrotile mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 281d14c22a4..d3a86e43c01 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
}
/**
+ * radeon_fence_wait_locked - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ *
+ * Wait for the requested fence to signal (all asics).
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_locked(struct radeon_fence *fence)
+{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
+ int r;
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
+
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
+ if (r)
+ return r;
+
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8a83b89d470..3044e504f4e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -651,7 +651,7 @@ retry:
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
0, pd_entries, 0, 0);
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -ENOMEM;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, radeon_vm_page_flags(bo_va->flags));
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bb8710531a1..55d0b474bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE)
- return -EINVAL;
+ *value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
+ if (rdev->family >= CHIP_BONAIRE) {
+ value = rdev->config.cik.macrotile_mode_array;
+ value_size = sizeof(uint32_t)*16;
+ } else {
+ DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
+ return -EINVAL;
+ }
+ break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0c7b8c66301..0b158f98d28 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
+
+ /* On old GPU like RN50 with little vram pining can fails because
+ * current fb is taking all space needed. So instead of unpining
+ * the old buffer after pining the new one, first unpin old one
+ * and then retry pining new one.
+ *
+ * As only master can set mode only master can pin and it is
+ * unlikely the master client will race with itself especialy
+ * on those old gpu with single crtc.
+ *
+ * We don't shutdown the display controller because new buffer
+ * will end up in same spot.
+ */
+ if (!atomic && fb && fb != crtc->fb) {
+ struct radeon_bo *old_rbo;
+ unsigned long nsize, osize;
+
+ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+ osize = radeon_bo_size(old_rbo);
+ nsize = radeon_bo_size(rbo);
+ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+ radeon_bo_unpin(old_rbo);
+ radeon_bo_unreserve(old_rbo);
+ fb = NULL;
+ goto retry;
+ }
+ }
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 866ace070b9..d1385ccc672 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_CAYMAN:
- case CHIP_ARUBA:
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
@@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
+ case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 18254e1c3e7..9214403ae17 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
- int i, r;
+ int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- ib->sync_to[i] = NULL;
return 0;
}
@@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
- * radeon_ib_sync_to - sync to fence before executing the IB
- *
- * @ib: IB object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence before executing the IB
- */
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = ib->sync_to[fence->ring];
- ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- bool need_sync = false;
- int i, r = 0;
+ int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = ib->sync_to[i];
- if (radeon_fence_need_sync(fence, ib->ring)) {
- need_sync = true;
- radeon_semaphore_sync_rings(rdev, ib->semaphore,
- fence->ring, ib->ring);
- radeon_fence_note_sync(fence, ib->ring);
- }
- }
- /* immediately free semaphore when we don't need to sync */
- if (!need_sync) {
- radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
}
+
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 8dcc20f53d7..2b42aa1914f 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -29,12 +29,12 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
-
+#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- int r;
+ int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ (*semaphore)->sync_to[i] = NULL;
+
return 0;
}
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- --semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_signale(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
+ --semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
}
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- ++semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_wait(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
+ ++semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_semaphore_sync_to - use the semaphore to sync to a fence
+ *
+ * @semaphore: semaphore object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using this semaphore object
+ */
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = semaphore->sync_to[fence->ring];
+ semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
-/* caller must hold ring lock */
+/**
+ * radeon_semaphore_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @semaphore: semaphore object to use for sync
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter)
+ int ring)
{
- int r;
+ int i, r;
- /* no need to signal and wait on the same ring */
- if (signaler == waiter) {
- return 0;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = semaphore->sync_to[i];
- /* prevent GPU deadlocks */
- if (!rdev->ring[signaler].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- return -EINVAL;
- }
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
- r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
- if (r) {
- return r;
- }
- radeon_semaphore_emit_signal(rdev, signaler, semaphore);
- radeon_ring_commit(rdev, &rdev->ring[signaler]);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
- /* we assume caller has already allocated space on waiters ring */
- radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r) {
+ return r;
+ }
- /* for debugging lockup only, used by sysfs debug files */
- rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
- rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_fence_note_sync(fence, ring);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 811bca691b3..9f0e18172b6 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno)
);
+DECLARE_EVENT_CLASS(radeon_semaphore_request,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem),
+
+ TP_STRUCT__entry(
+ __field(int, ring)
+ __field(signed, waiters)
+ __field(uint64_t, gpu_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->waiters = sem->waiters;
+ __entry->gpu_addr = sem->gpu_addr;
+ ),
+
+ TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+ __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index f9b02e3d683..aca8cbe8a33 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 8e8f4613353..59be2cfcbb4 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 9364129ba29..d700698a1f2 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
- pi->enable_mg_clock_gating = true;
- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
- pi->override_dynamic_mgpg = true;
+ pi->enable_mg_clock_gating = false;
+ pi->enable_gfx_dynamic_mgpg = false;
+ pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d978..d4a68af1a27 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index 5b6fa1f62d4..d722db2cf34 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -37,7 +37,7 @@
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+ return true;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6..07e02c4bf5a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
-
+ mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
+ mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ /*
+ * In the absense of a wait_unlocked API,
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
+ * concurrent use of this function. Note that this use of
+ * bo::wu_mutex can go away if we change locking order to
+ * mmap_sem -> bo::reserve.
+ */
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+ ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ ww_mutex_unlock(&bo->resv->lock);
+
+out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c463c38..15b86a94949 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
- * Move nonexistent data. NOP.
+ * Don't move nonexistent data. Clear destination instead.
*/
- if (old_iomap == NULL && ttm == NULL)
+ if (old_iomap == NULL &&
+ (ttm == NULL || ttm->state == tt_unpopulated)) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
/*
* TTM might be null for moves within the same region.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3ecd0..b249ab9b1eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after scheduling.
+ * for reserve, and if it fails, retry the fault after waiting
+ * for the buffer to become unreserved.
*/
-
- ret = ttm_bo_reserve(bo, true, true, false, 0);
+ ret = ttm_bo_reserve(bo, true, true, false, NULL);
if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
+ if (ret != -EBUSY)
+ return VM_FAULT_NOPAGE;
+
+ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ up_read(&vma->vm_mm->mmap_sem);
+ (void) ttm_bo_wait_unreserved(bo);
+ }
+
+ return VM_FAULT_RETRY;
+ }
+
+ /*
+ * If we'd want to change locking order to
+ * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+ * instead of retrying the fault...
+ */
return VM_FAULT_NOPAGE;
}
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
break;
case -EBUSY:
- set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5..479e9418e3d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
- struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- ww_acquire_fini(ticket);
+ ttm_eu_backoff_reservation_locked(list);
+ if (ticket)
+ ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- ww_acquire_init(ticket, &reservation_ww_class);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
if (entry->reserved)
continue;
-
- ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+ ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+ ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
+ BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
}
}
- ww_acquire_done(ticket);
+ if (ticket)
+ ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
err:
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
- ww_acquire_fini(ticket);
+ if (ticket)
+ ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258..6fe7b92a82d 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
@@ -34,6 +40,7 @@
* and release on file close.
*/
+
/**
* struct ttm_object_file
*
@@ -84,6 +91,9 @@ struct ttm_object_device {
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
+ struct dma_buf_ops ops;
+ void (*dmabuf_release)(struct dma_buf *dma_buf);
+ size_t dma_buf_size;
};
/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
struct ttm_object_file *tfile;
};
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
@@ -416,9 +428,10 @@ out_err:
}
EXPORT_SYMBOL(ttm_object_file_init);
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
- *mem_glob,
- unsigned int hash_order)
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
+ if (ret != 0)
+ goto out_no_object_hash;
- if (likely(ret == 0))
- return tdev;
+ tdev->ops = *ops;
+ tdev->dmabuf_release = tdev->ops.release;
+ tdev->ops.release = ttm_prime_dmabuf_release;
+ tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+ ttm_round_pot(sizeof(struct file));
+ return tdev;
+out_no_object_hash:
kfree(tdev);
return NULL;
}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+ return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_prime_object *prime;
+
+ *p_base = NULL;
+ prime = container_of(base, struct ttm_prime_object, base);
+ BUG_ON(prime->dma_buf != NULL);
+ mutex_destroy(&prime->mutex);
+ if (prime->refcount_release)
+ prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct ttm_prime_object *prime =
+ (struct ttm_prime_object *) dma_buf->priv;
+ struct ttm_base_object *base = &prime->base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ if (tdev->dmabuf_release)
+ tdev->dmabuf_release(dma_buf);
+ mutex_lock(&prime->mutex);
+ if (prime->dma_buf == dma_buf)
+ prime->dma_buf = NULL;
+ mutex_unlock(&prime->mutex);
+ ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+ ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ struct ttm_base_object *base;
+ int ret;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &tdev->ops)
+ return -ENOSYS;
+
+ prime = (struct ttm_prime_object *) dma_buf->priv;
+ base = &prime->base;
+ *handle = base->hash.key;
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ int ret;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL ||
+ base->object_type != ttm_prime_type)) {
+ ret = -ENOENT;
+ goto out_unref;
+ }
+
+ prime = container_of(base, struct ttm_prime_object, base);
+ if (unlikely(!base->shareable)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = mutex_lock_interruptible(&prime->mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_unref;
+ }
+
+ dma_buf = prime->dma_buf;
+ if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+
+ /*
+ * Need to create a new dma_buf, with memory accounting.
+ */
+ ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ dma_buf = dma_buf_export(prime, &tdev->ops,
+ prime->size, flags);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ttm_mem_global_free(tdev->mem_glob,
+ tdev->dma_buf_size);
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ /*
+ * dma_buf has taken the base object reference
+ */
+ base = NULL;
+ prime->dma_buf = dma_buf;
+ }
+ mutex_unlock(&prime->mutex);
+
+ ret = dma_buf_fd(dma_buf, flags);
+ if (ret >= 0) {
+ *prime_fd = ret;
+ ret = 0;
+ } else
+ dma_buf_put(dma_buf);
+
+out_unref:
+ if (base)
+ ttm_base_object_unref(&base);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+ struct ttm_prime_object *prime, bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ mutex_init(&prime->mutex);
+ prime->size = PAGE_ALIGN(size);
+ prime->real_type = type;
+ prime->dma_buf = NULL;
+ prime->refcount_release = refcount_release;
+ return ttm_base_object_init(tfile, &prime->base, shareable,
+ ttm_prime_type,
+ ttm_prime_refcount_release,
+ ref_obj_release);
+}
+EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd91ac1..9f8b690bcf5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o
+ vmwgfx_surface.o vmwgfx_prime.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 20d5485eaf9..c7a549694e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12);
+ (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
.dumb_map_offset = vmw_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
+ .prime_fd_to_handle = vmw_prime_fd_to_handle,
+ .prime_handle_to_fd = vmw_prime_handle_to_fd,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5dbcb9..db85985c708 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -819,6 +819,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 00000000000..31fe32d8d65
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_object.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgb,
+ enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+ return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops = {
+ .attach = vmw_prime_map_attach,
+ .detach = vmw_prime_map_detach,
+ .map_dma_buf = vmw_prime_map_dma_buf,
+ .unmap_dma_buf = vmw_prime_unmap_dma_buf,
+ .release = NULL,
+ .kmap = vmw_prime_dmabuf_kmap,
+ .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .kunmap = vmw_prime_dmabuf_kunmap,
+ .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .mmap = vmw_prime_dmabuf_mmap,
+ .vmap = vmw_prime_dmabuf_vmap,
+ .vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 252501a54de..efe2b74c5eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != converter->object_type))
+ if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
@@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- ttm_base_object_kfree(vmw_user_bo, base);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
if (unlikely(base == NULL))
return;
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
@@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_base_object_init(tfile,
- &user_bo->base,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
- *handle = user_bo->base.hash.key;
+ *handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
@@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
- return (vmw_user_bo->base.tfile == tfile ||
- vmw_user_bo->base.shareable) ? 0 : -EPERM;
+ return (vmw_user_bo->prime.base.tfile == tfile ||
+ vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
- if (unlikely(base->object_type != ttm_buffer_type)) {
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
- return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL);
}
/*
@@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
+ args->size,
+ &vmw_user_bo->prime,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0))
goto out_no_base_object;
- args->handle = vmw_user_bo->base.hash.key;
+ args->handle = vmw_user_bo->prime.base.hash.key;
out_no_base_object:
ttm_bo_unref(&tmp);
@@ -994,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
- struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -1011,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1029,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
@@ -1074,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1084,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1099,14 +1102,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
- struct ww_acquire_ctx ticket;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- ret = vmw_resource_check_buffer(res, &ticket, interruptible,
- &val_buf);
+ ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1121,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&ticket, &val_buf);
+ vmw_resource_backoff_reservation(&val_buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 58281433974..7de2ea8bd55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
- return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+ return &(container_of(base, struct vmw_user_surface,
+ prime.base)->srf.res);
}
/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
+ container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
/**
* From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->base.hash.key;
+ rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;