aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-05-21 11:07:24 +0200
committerShow Liu <show.liu@linaro.org>2014-06-18 12:05:04 +0800
commit408228a9e6ed4cacc269e38aff83e25e7ce07fa4 (patch)
tree8831a1396a603b26c6d62fc3abe3752c369e4013 /drivers/gpu/drm/i915/i915_gem.c
parentc590400d6bc68eacc56d6163c4cb4d995b3a11d6 (diff)
drm/i915: Fix unsafe loop iteration over vma whilst unbinding them
This is commit df6f783a4ef6790780a67c491897ac upstream. On non-LLC platforms, when changing the cache level of an object, we may need to unbind it so that prefetching across page boundaries does not cross into a different memory domain. This requires us to unbind conflicting vma, but we did so iterating over the objects vma in an unsafe manner (as the list was being modified as we iterated). The regression was introduced in commit 3089c6f239d7d2c4cb2dd5c353e8984cf79af1d7 Author: Ben Widawsky <ben@bwidawsk.net> Date: Wed Jul 31 17:00:03 2013 -0700 drm/i915: make caching operate on all address spaces apparently as far back as v3.12-rc1, but it has only just begun to trigger real world bug reports. Reported-and-tested-by: Nikolay Martynov <mar.kolya@gmail.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=76384 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00c83615472..3ecb332e7cf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3529,7 +3529,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_vma *vma;
+ struct i915_vma *vma, *next;
int ret;
if (obj->cache_level == cache_level)
@@ -3540,7 +3540,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)