diff options
author | Chris Wilson | 2010-09-22 10:31:52 +0100 |
---|---|---|
committer | Chris Wilson | 2010-09-22 10:31:52 +0100 |
commit | dfaae392f4461785eb1c92aeaf2a1040b184edba (patch) | |
tree | 2239f155fdbed50f82d218b3499d06fb7c68f288 /drivers | |
parent | 9e0ae53404700f1e4ae1f33b0ff92948ae0e509d (diff) |
drm/i915: Clear the gpu_write_list on resetting write_domain upon hang
Otherwise we will hit a list handling assertion when moving the object
to the inactive list.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 51 |
3 files changed, 42 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4e83bb36888e..2184d29e7a9f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -395,21 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags) mutex_lock(&dev->struct_mutex); - /* - * Clear request list - */ - i915_gem_retire_requests(dev); - - /* Remove anything from the flushing lists. The GPU cache is likely - * to be lost on reset along with the data, so simply move the - * lost bo to the inactive list. - */ - i915_gem_reset_flushing_list(dev); - - /* Move everything out of the GPU domains to ensure we do any - * necessary invalidation upon reuse. - */ - i915_gem_reset_inactive_gpu_domains(dev); + i915_gem_reset_lists(dev); /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12e9f853a5e9..5fec2ca619e8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1005,8 +1005,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_reset_flushing_list(struct drm_device *dev); -void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); +void i915_gem_reset_lists(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); void i915_gem_flush_ring(struct drm_device *dev, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 734cc08c3fdb..0ce28c71facc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1682,27 +1682,60 @@ i915_get_gem_seqno(struct drm_device *dev, return ring->get_gem_seqno(dev, ring); } -void i915_gem_reset_flushing_list(struct drm_device *dev) +static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; + while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; - while (!list_empty(&dev_priv->mm.flushing_list)) { + request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + + list_del(&request->list); + list_del(&request->client_list); + kfree(request); + } + + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj_priv; - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, list); obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_inactive(&obj_priv->base); } } -void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev) +void i915_gem_reset_lists(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; + i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); + if (HAS_BSD(dev)) + i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + + /* Remove anything from the flushing lists. The GPU cache is likely + * to be lost on reset along with the data, so simply move the + * lost bo to the inactive list. + */ + while (!list_empty(&dev_priv->mm.flushing_list)) { + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list); + + obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); + i915_gem_object_move_to_inactive(&obj_priv->base); + } + + /* Move everything out of the GPU domains to ensure we do any + * necessary invalidation upon reuse. + */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) @@ -1720,15 +1753,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - bool wedged; if (!ring->status_page.page_addr || list_empty(&ring->request_list)) return; seqno = i915_get_gem_seqno(dev, ring); - wedged = atomic_read(&dev_priv->mm.wedged); - while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1736,7 +1766,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, struct drm_i915_gem_request, list); - if (!wedged && !i915_seqno_passed(seqno, request->seqno)) + if (!i915_seqno_passed(seqno, request->seqno)) break; trace_i915_gem_request_retire(dev, request->seqno); @@ -1757,8 +1787,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, struct drm_i915_gem_object, list); - if (!wedged && - !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) break; obj = &obj_priv->base; |