aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-04 20:13:30 +0200
committerDave Airlie <airlied@redhat.com>2011-10-05 10:17:22 +0100
commite2fa3a76839ada0d788549607263a036aa654243 (patch)
tree24883fcf9f80483aed537661a49ed389d0dff671 /drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
parente93daed8e2fd5ce3dc98efe9938426127a534ccc (diff)
vmwgfx: Fix up query processing
Previously, query results could be placed in any buffer object, but since we didn't allow pinned buffer objects, query results could be written when that buffer was evicted, corrupting data in other buffers. Now, require that buffers holding query results are no more than two pages large, and allow one single pinned such buffer. When the command submission code encounters query result structures in other buffers, the queries in the pinned buffer will be finished using a query barrier for the last hardware context using the buffer. Also if the command submission code detects that a new hardware context is used for queries, all queries of the previous hardware context is also flushed. Currently we use waiting for a no-op occlusion query as a query barrier for a specific context. The query buffer is also flushed and unpinned on context destructions, master drops and before scanout bo placement. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 7f744a82892..3fa884db08a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -42,6 +42,7 @@
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
@@ -59,6 +60,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
@@ -78,6 +81,7 @@ err:
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
@@ -100,6 +104,9 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
@@ -177,6 +184,7 @@ int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
@@ -205,6 +213,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
@@ -276,3 +287,36 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
ptr->offset = 0;
}
}
+
+
+/**
+ * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ *
+ * @bo: The buffer object. Must be reserved, and present either in VRAM
+ * or GMR memory.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+{
+ uint32_t pl_flags;
+ struct ttm_placement placement;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(old_mem_type != TTM_PL_VRAM &&
+ old_mem_type != VMW_PL_FLAG_GMR);
+
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl_flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl_flags;
+
+ ret = ttm_bo_validate(bo, &placement, false, true, true);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}