From 37e680a15ff6375ff02773161f817e90a38c51f7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 7 Jun 2012 15:38:42 +0100 Subject: drm/i915: Introduce drm_i915_gem_object_ops In order to specialise functions depending upon the type of object, we can attach vfuncs to each object via a new ->ops pointer. For instance, this will be used in future patches to only bind pages from a dma-buf for the duration that the object is used by the GPU - and so prevent them from pinning those pages for the entire of the object. v2: Bonus comments. Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 26c6959a524..ada8ea172d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -899,9 +899,29 @@ enum i915_cache_level { I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ }; +struct drm_i915_gem_object_ops { + /* Interface between the GEM object and its backing storage. + * get_pages() is called once prior to the use of the associated set + * of pages before to binding them into the GTT, and put_pages() is + * called after we no longer need them. As we expect there to be + * associated cost with migrating pages between the backing storage + * and making them available for the GPU (e.g. clflush), we may hold + * onto the pages after they are no longer referenced by the GPU + * in case they may be used again shortly (for example migrating the + * pages to a different memory domain within the GTT). put_pages() + * will therefore most likely be called when the object itself is + * being released or under memory pressure (where we attempt to + * reap pages for the shrinker). + */ + int (*get_pages)(struct drm_i915_gem_object *); + void (*put_pages)(struct drm_i915_gem_object *); +}; + struct drm_i915_gem_object { struct drm_gem_object base; + const struct drm_i915_gem_object_ops *ops; + /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; @@ -1306,7 +1326,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -void i915_gem_object_init(struct drm_i915_gem_object *obj); +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); @@ -1319,7 +1340,7 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); -- cgit v1.2.3