From 78501eac34f372bfbeb4e1d9de688c13efa916f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 12:18:21 +0100 Subject: drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface The ringbuffer keeps a pointer to the parent device, so we can use that instead of passing around the pointer on the stack. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..852a2d848bf4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -297,7 +297,7 @@ static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 seqno = ring->get_seqno(dev, ring); + u32 seqno = ring->get_seqno(ring); ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); wake_up_all(&ring->irq_queue); @@ -586,7 +586,7 @@ static void i915_capture_error_state(struct drm_device *dev) DRM_DEBUG_DRIVER("generating error event\n"); error->seqno = - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); + dev_priv->render_ring.get_seqno(&dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -1117,7 +1117,7 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno) struct intel_ring_buffer *render_ring = &dev_priv->render_ring; if (dev_priv->trace_irq_seqno == 0) - render_ring->user_irq_get(dev, render_ring); + render_ring->user_irq_get(render_ring); dev_priv->trace_irq_seqno = seqno; } @@ -1141,10 +1141,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - render_ring->user_irq_get(dev, render_ring); + render_ring->user_irq_get(render_ring); DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - render_ring->user_irq_put(dev, render_ring); + render_ring->user_irq_put(render_ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1338,7 +1338,7 @@ void i915_hangcheck_elapsed(unsigned long data) /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), + i915_seqno_passed(dev_priv->render_ring.get_seqno(&dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) { bool missed_wakeup = false; -- cgit v1.2.3 From e1f99ce6cac3b6a95551642be5ddb5d9c46bea76 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 12:45:26 +0100 Subject: drm/i915: Propagate errors from writing to ringbuffer Preparing the ringbuffer for adding new commands can fail (a timeout whilst waiting for the GPU to catch up and free some space). So check for any potential error before overwriting HEAD with new commands, and propagate that error back to the user where possible. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 119 ++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 28 +---- drivers/gpu/drm/i915/i915_gem.c | 5 +- drivers/gpu/drm/i915/i915_irq.c | 13 ++- drivers/gpu/drm/i915/intel_display.c | 51 +++++---- drivers/gpu/drm/i915/intel_overlay.c | 30 ++++- drivers/gpu/drm/i915/intel_ringbuffer.c | 189 ++++++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 8 files changed, 244 insertions(+), 195 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8a171394a9cf..02daf4e5c8e6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -263,7 +263,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ -static int do_validate_cmd(int cmd) +static int validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: @@ -321,40 +321,27 @@ static int do_validate_cmd(int cmd) return 0; } -static int validate_cmd(int cmd) -{ - int ret = do_validate_cmd(cmd); - -/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ - - return ret; -} - static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) return -EINVAL; - BEGIN_LP_RING((dwords+1)&~1); - for (i = 0; i < dwords;) { - int cmd, sz; - - cmd = buffer[i]; - - if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) + int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) return -EINVAL; - - OUT_RING(cmd); - - while (++i, --sz) { - OUT_RING(buffer[i]); - } + i += sz; } + ret = BEGIN_LP_RING((dwords+1)&~1); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + OUT_RING(buffer[i]); if (dwords & 1) OUT_RING(0); @@ -368,7 +355,9 @@ i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_clip_rect box = boxes[i]; + int ret; if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", @@ -377,22 +366,27 @@ i915_emit_box(struct drm_device *dev, } if (INTEL_INFO(dev)->gen >= 4) { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); return 0; } @@ -412,12 +406,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, @@ -458,8 +453,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { + struct drm_i915_private *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; - int i = 0, count; + int i, count, ret; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); @@ -469,17 +465,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, i915_kernel_lost_context(dev); count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - batch->DR1, batch->DR4); + ret = i915_emit_box(dev, cliprects, i, + batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); @@ -487,26 +485,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); } if (IS_G4X(dev) || IS_GEN5(dev)) { - BEGIN_LP_RING(2); - OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(2) == 0) { + OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + } } - i915_emit_breadcrumb(dev); + i915_emit_breadcrumb(dev); return 0; } @@ -515,6 +516,7 @@ static int i915_dispatch_flip(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret; if (!master_priv->sarea_priv) return -EINVAL; @@ -526,12 +528,13 @@ static int i915_dispatch_flip(struct drm_device * dev) i915_kernel_lost_context(dev); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(10); + if (ret) + return ret; + OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(6); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { @@ -542,21 +545,21 @@ static int i915_dispatch_flip(struct drm_device * dev) dev_priv->current_page = 0; } OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); + ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6fb225f6b2c8..c241468c632e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1216,30 +1216,14 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ I915_DEBUG_WRITE)) -#define I915_VERBOSE 0 +#define BEGIN_LP_RING(n) \ + intel_ring_begin(&dev_priv->render_ring, (n)) -#define BEGIN_LP_RING(n) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ - intel_ring_begin(&dev_priv__->render_ring, (n)); \ -} while (0) - - -#define OUT_RING(x) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ - intel_ring_emit(&dev_priv__->render_ring, x); \ -} while (0) +#define OUT_RING(x) \ + intel_ring_emit(&dev_priv->render_ring, x) -#define ADVANCE_LP_RING() do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG("ADVANCE_LP_RING %x\n", \ - dev_priv__->render_ring.tail); \ - intel_ring_advance(&dev_priv__->render_ring); \ -} while(0) +#define ADVANCE_LP_RING() \ + intel_ring_advance(&dev_priv->render_ring) /** * Reads a dword out of the status page, which is written to from the command diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97bf7c87d857..00e901483ba5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3826,7 +3826,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + goto err; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 852a2d848bf4..8acdd6d857d3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1101,12 +1101,13 @@ static int i915_emit_irq(struct drm_device * dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } return dev_priv->counter; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b2..eb4c725e3069 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5090,22 +5090,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (ret) goto cleanup_objs; - /* Block clients from rendering to the new back buffer until - * the flip occurs and the object is no longer visible. - */ - atomic_add(1 << intel_crtc->plane, - &to_intel_bo(work->old_fb_obj)->pending_flip); - - work->pending_flip_obj = obj; - obj_priv = to_intel_bo(obj); - if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. */ - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + goto cleanup_objs; + if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else @@ -5115,13 +5109,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ADVANCE_LP_RING(); } + work->pending_flip_obj = obj; + obj_priv = to_intel_bo(obj); + work->enable_stall_check = true; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; - BEGIN_LP_RING(4); - switch(INTEL_INFO(dev)->gen) { + ret = BEGIN_LP_RING(4); + if (ret) + goto cleanup_objs; + + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, + &to_intel_bo(work->old_fb_obj)->pending_flip); + + switch (INTEL_INFO(dev)->gen) { case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); @@ -5850,16 +5856,17 @@ void intel_init_clock_gating(struct drm_device *dev) struct drm_i915_gem_object *obj_priv; obj_priv = to_intel_bo(dev_priv->renderctx); if (obj_priv) { - BEGIN_LP_RING(4); - OUT_RING(MI_SET_CONTEXT); - OUT_RING(obj_priv->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_SET_CONTEXT); + OUT_RING(obj_priv->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); + } } } else DRM_DEBUG_KMS("Failed to allocate render context." diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219a..78fa6a249964 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -289,6 +289,7 @@ i830_deactivate_pipe_a(struct drm_device *dev) static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_request *request; int pipe_a_quirk = 0; int ret; @@ -308,7 +309,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) goto out; } - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) { + kfree(request); + goto out; + } + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); OUT_RING(overlay->flip_addr | OFC_UPDATE); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -332,6 +338,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; + int ret; BUG_ON(!overlay->active); @@ -347,7 +354,11 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); ADVANCE_LP_RING(); @@ -389,8 +400,10 @@ static int intel_overlay_off(struct intel_overlay *overlay, bool interruptible) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_addr = overlay->flip_addr; struct drm_i915_gem_request *request; + int ret; BUG_ON(!overlay->active); @@ -404,7 +417,11 @@ static int intel_overlay_off(struct intel_overlay *overlay, * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) { + kfree(request); + return ret; + } /* wait for overlay to go idle */ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); @@ -467,7 +484,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (request == NULL) return -ENOMEM; - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d6eba661105f..6fe42c1f4ea9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -112,10 +112,11 @@ render_ring_flush(struct intel_ring_buffer *ring, #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif - intel_ring_begin(ring, 2); - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } } @@ -244,16 +245,17 @@ render_ring_add_request(struct intel_ring_buffer *ring, seqno = i915_gem_get_seqno(dev); if (IS_GEN6(dev)) { - intel_ring_begin(ring, 6); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 6) == 0) { + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } else if (HAS_PIPE_CONTROL(dev)) { u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; @@ -262,38 +264,40 @@ render_ring_add_request(struct intel_ring_buffer *ring, * PIPE_NOTIFY buffers out to memory before requesting * an interrupt. */ - intel_ring_begin(ring, 32); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 32) == 0) { + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } else { - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + } } return seqno; } @@ -359,10 +363,11 @@ bsd_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(ring, 2); - intel_ring_emit(ring, MI_FLUSH); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } static u32 @@ -373,12 +378,13 @@ ring_add_request(struct intel_ring_buffer *ring, seqno = i915_gem_get_seqno(ring->dev); - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + } DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); @@ -409,10 +415,14 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint64_t exec_offset) { uint32_t exec_start; + int ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6) | @@ -432,8 +442,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; - int i = 0, count; uint32_t exec_start, exec_len; + int i, count, ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; @@ -441,23 +451,28 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - exec->DR1, exec->DR4); + ret = i915_emit_box(dev, cliprects, i, + exec->DR1, exec->DR4); if (ret) return ret; } if (IS_I830(dev) || IS_845G(dev)) { - intel_ring_begin(ring, 4); + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER); intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); intel_ring_emit(ring, exec_start + exec_len - 4); intel_ring_emit(ring, 0); } else { - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6) @@ -474,12 +489,13 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, } if (IS_G4X(dev) || IS_GEN5(dev)) { - intel_ring_begin(ring, 2); - intel_ring_emit(ring, MI_FLUSH | - MI_NO_WRITE_FLUSH | - MI_INVALIDATE_ISP ); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, MI_FLUSH | + MI_NO_WRITE_FLUSH | + MI_INVALIDATE_ISP ); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } /* XXX breadcrumb */ @@ -693,18 +709,26 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) return -EBUSY; } -void intel_ring_begin(struct intel_ring_buffer *ring, - int num_dwords) +int intel_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) { int n = 4*num_dwords; + int ret; - if (unlikely(ring->tail + n > ring->size)) - intel_wrap_ring_buffer(ring); + if (unlikely(ring->tail + n > ring->size)) { + ret = intel_wrap_ring_buffer(ring); + if (unlikely(ret)) + return ret; + } - if (unlikely(ring->space < n)) - intel_wait_ring_buffer(ring, n); + if (unlikely(ring->space < n)) { + ret = intel_wait_ring_buffer(ring, n); + if (unlikely(ret)) + return ret; + } ring->space -= n; + return 0; } void intel_ring_advance(struct intel_ring_buffer *ring) @@ -772,12 +796,13 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_FLUSH_DW); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } static int @@ -787,10 +812,14 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint64_t exec_offset) { uint32_t exec_start; + int ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, exec_start); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ba4a393e6d16..35ece2b87b02 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -100,8 +100,8 @@ intel_read_status_page(struct intel_ring_buffer *ring, } void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); -int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); -void intel_ring_begin(struct intel_ring_buffer *ring, int n); +int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); static inline void intel_ring_emit(struct intel_ring_buffer *ring, u32 data) -- cgit v1.2.3 From 893eead092f14e42cac054a394a86e3c6e016b68 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 14:44:35 +0100 Subject: drm/i915: Fix hangcheck to handle multiple rings Currently, we believe the GPU is idle if just the RENDER ring is idle. This is obviously wrong if we only using either the BLT or the BSD rings and so masking genuine hangs. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 74 +++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8acdd6d857d3..23b28528c642 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1307,12 +1307,29 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -static struct drm_i915_gem_request * -i915_get_tail_request(struct drm_device *dev) +static u32 +ring_last_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - return list_entry(dev_priv->render_ring.request_list.prev, - struct drm_i915_gem_request, list); + return list_entry(ring->request_list.prev, + struct drm_i915_gem_request, list)->seqno; +} + +static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +{ + if (list_empty(&ring->request_list) || + i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { + /* Issue a wake-up to catch stuck h/w. */ + if (ring->waiting_gem_seqno && waitqueue_active(&ring->irq_queue)) { + DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", + ring->name, + ring->waiting_gem_seqno, + ring->get_seqno(ring)); + wake_up_all(&ring->irq_queue); + *err = true; + } + return true; + } + return false; } /** @@ -1326,6 +1343,17 @@ void i915_hangcheck_elapsed(unsigned long data) struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd, instdone, instdone1; + bool err = false; + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) && + i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) && + i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) { + dev_priv->hangcheck_count = 0; + if (err) + goto repeat; + return; + } if (INTEL_INFO(dev)->gen < 4) { acthd = I915_READ(ACTHD); @@ -1337,38 +1365,6 @@ void i915_hangcheck_elapsed(unsigned long data) instdone1 = I915_READ(INSTDONE1); } - /* If all work is done then ACTHD clearly hasn't advanced. */ - if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(dev_priv->render_ring.get_seqno(&dev_priv->render_ring), - i915_get_tail_request(dev)->seqno)) { - bool missed_wakeup = false; - - dev_priv->hangcheck_count = 0; - - /* Issue a wake-up to catch stuck h/w. */ - if (dev_priv->render_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->render_ring.irq_queue)) { - wake_up_all(&dev_priv->render_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->bsd_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { - wake_up_all(&dev_priv->bsd_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->blt_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->blt_ring.irq_queue)) { - wake_up_all(&dev_priv->blt_ring.irq_queue); - missed_wakeup = true; - } - - if (missed_wakeup) - DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); - return; - } - if (dev_priv->last_acthd == acthd && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { @@ -1385,7 +1381,7 @@ void i915_hangcheck_elapsed(unsigned long data) if (tmp & RING_WAIT) { I915_WRITE(PRB0_CTL, tmp); POSTING_READ(PRB0_CTL); - goto out; + goto repeat; } } @@ -1400,7 +1396,7 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 = instdone1; } -out: +repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); -- cgit v1.2.3 From b2223497b44a4701d1be873d1e9453d7f720043b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 15:27:33 +0100 Subject: drm/i915: Remove the confusing global waiting/irq seqno Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 44 ++++++++++++++------------------- drivers/gpu/drm/i915/i915_drv.h | 10 -------- drivers/gpu/drm/i915/i915_gem.c | 4 +-- drivers/gpu/drm/i915/i915_irq.c | 6 ++--- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +-- 5 files changed, 26 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c5aa6bee3abb..beb3de7921b5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -319,6 +319,19 @@ static int i915_gem_request_info(struct seq_file *m, void *data) return 0; } +static void i915_ring_seqno_info(struct seq_file *m, + struct intel_ring_buffer *ring) +{ + if (ring->get_seqno) { + seq_printf(m, "Current sequence (%s): %d\n", + ring->name, ring->get_seqno(ring)); + seq_printf(m, "Waiter sequence (%s): %d\n", + ring->name, ring->waiting_seqno); + seq_printf(m, "IRQ sequence (%s): %d\n", + ring->name, ring->irq_seqno); + } +} + static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -330,15 +343,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) if (ret) return ret; - if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + i915_ring_seqno_info(m, &dev_priv->render_ring); + i915_ring_seqno_info(m, &dev_priv->bsd_ring); + i915_ring_seqno_info(m, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); @@ -390,22 +397,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - if (dev_priv->render_ring.get_seqno) { - seq_printf(m, "Current sequence (render): %d\n", - dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); - } - if (dev_priv->bsd_ring.get_seqno) { - seq_printf(m, "Current sequence (BSD): %d\n", - dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring)); - } - if (dev_priv->blt_ring.get_seqno) { - seq_printf(m, "Current sequence (BLT): %d\n", - dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring)); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); + i915_ring_seqno_info(m, &dev_priv->render_ring); + i915_ring_seqno_info(m, &dev_priv->bsd_ring); + i915_ring_seqno_info(m, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c241468c632e..2af8e1604b44 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -608,16 +608,6 @@ typedef struct drm_i915_private { */ struct delayed_work retire_work; - /** - * Waiting sequence number, if any - */ - uint32_t waiting_gem_seqno; - - /** - * Last seq seen at irq time - */ - uint32_t irq_gem_seqno; - /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 580244c1e793..74f5525d156f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1984,7 +1984,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); - ring->waiting_gem_seqno = seqno; + ring->waiting_seqno = seqno; ring->user_irq_get(ring); if (interruptible) ret = wait_event_interruptible(ring->irq_queue, @@ -1996,7 +1996,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, || atomic_read(&dev_priv->mm.wedged)); ring->user_irq_put(ring); - ring->waiting_gem_seqno = 0; + ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 23b28528c642..29cbcb33b92c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -298,7 +298,7 @@ static void notify_ring(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = ring->get_seqno(ring); - ring->irq_gem_seqno = seqno; + ring->irq_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); wake_up_all(&ring->irq_queue); dev_priv->hangcheck_count = 0; @@ -1319,10 +1319,10 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) if (list_empty(&ring->request_list) || i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ - if (ring->waiting_gem_seqno && waitqueue_active(&ring->irq_queue)) { + if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", ring->name, - ring->waiting_gem_seqno, + ring->waiting_seqno, ring->get_seqno(ring)); wake_up_all(&ring->irq_queue); *err = true; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 84f6919de8e0..7ad9e94220b4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -35,8 +35,8 @@ struct intel_ring_buffer { int size; struct intel_hw_status_page status_page; - u32 irq_gem_seqno; /* last seq seem at irq time */ - u32 waiting_gem_seqno; + u32 irq_seqno; /* last seq seem at irq time */ + u32 waiting_seqno; int user_irq_refcount; void (*user_irq_get)(struct intel_ring_buffer *ring); void (*user_irq_put)(struct intel_ring_buffer *ring); -- cgit v1.2.3 From f406839f094ef24bb201c9574fdb9ce8e799a975 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 20:36:41 +0100 Subject: drm/i915: Capture ERROR register on Sandybridge hangs This holds error state from the main graphics arbiter mainly involving the DMA engine and address translation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 18 +++++++++++------- drivers/gpu/drm/i915/i915_reg.h | 2 ++ 4 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2e5961139326..4fc1e05b769f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -615,6 +615,9 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, "ERROR: 0x%08x\n", error->error); + } seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cdae5d189165..895f9173de58 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -142,6 +142,7 @@ struct sdvo_device_mapping { struct drm_i915_error_state { u32 eir; u32 pgtbl_er; + u32 error; /* gen6+ */ u32 pipeastat; u32 pipebstat; u32 ipeir; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 29cbcb33b92c..2a29497a4694 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -592,13 +592,11 @@ static void i915_capture_error_state(struct drm_device *dev) error->pipeastat = I915_READ(PIPEASTAT); error->pipebstat = I915_READ(PIPEBSTAT); error->instpm = I915_READ(INSTPM); - if (INTEL_INFO(dev)->gen < 4) { - error->ipeir = I915_READ(IPEIR); - error->ipehr = I915_READ(IPEHR); - error->instdone = I915_READ(INSTDONE); - error->acthd = I915_READ(ACTHD); - error->bbaddr = 0; - } else { + error->error = 0; + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + } + if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); error->ipehr = I915_READ(IPEHR_I965); error->instdone = I915_READ(INSTDONE_I965); @@ -606,6 +604,12 @@ static void i915_capture_error_state(struct drm_device *dev) error->instdone1 = I915_READ(INSTDONE1); error->acthd = I915_READ(ACTHD_I965); error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->ipeir = I915_READ(IPEIR); + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); + error->bbaddr = 0; } bbaddr = i915_ringbuffer_last_batch(dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a3112..fc161196da21 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -306,6 +306,8 @@ #define NOPID 0x02094 #define HWSTAM 0x02098 +#define ERROR_GEN6 0x040a0 + #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) -- cgit v1.2.3 From 1d8f38f4e7146d22f7fbc94eef0508bd75463f54 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 19:00:51 +0100 Subject: drm/i915: Record BLT engine error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 19 +++++++++++++------ drivers/gpu/drm/i915/i915_drv.h | 7 ++++++- drivers/gpu/drm/i915/i915_irq.c | 7 +++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 4 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ba2af4e046ed..70eec5aceabe 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -616,21 +616,28 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "ERROR: 0x%08x\n", error->error); + seq_printf(m, "Blitter command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); + seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); + seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); + seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); } - seq_printf(m, "EIR: 0x%08x\n", error->eir); - seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); - seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, "Render command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); - seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); if (INTEL_INFO(dev)->gen >= 4) { - seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); } - seq_printf(m, "seqno: 0x%08x\n", error->seqno); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, " seqno: 0x%08x\n", error->seqno); if (error->active_bo_count) { seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7aa7f8abf892..05bff9e6a9cb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -142,13 +142,18 @@ struct sdvo_device_mapping { struct drm_i915_error_state { u32 eir; u32 pgtbl_er; - u32 error; /* gen6+ */ u32 pipeastat; u32 pipebstat; u32 ipeir; u32 ipehr; u32 instdone; u32 acthd; + u32 error; /* gen6+ */ + u32 bcs_acthd; /* gen6+ blt engine */ + u32 bcs_ipehr; + u32 bcs_ipeir; + u32 bcs_instdone; + u32 bcs_seqno; u32 instpm; u32 instps; u32 instdone1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2a29497a4694..4ff39dee7f1c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -595,6 +595,13 @@ static void i915_capture_error_state(struct drm_device *dev) error->error = 0; if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); + error->bcs_acthd = I915_READ(BCS_ACTHD); + error->bcs_ipehr = I915_READ(BCS_IPEHR); + error->bcs_ipeir = I915_READ(BCS_IPEIR); + error->bcs_instdone = I915_READ(BCS_INSTDONE); + error->bcs_seqno = 0; + if (dev_priv->blt_ring.get_seqno) + error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fc161196da21..b173e5b77f96 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -305,6 +305,10 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define BCS_INSTDONE 0x2206C +#define BCS_IPEIR 0x22064 +#define BCS_IPEHR 0x22068 +#define BCS_ACTHD 0x22074 #define ERROR_GEN6 0x040a0 -- cgit v1.2.3 From add354ddf62beac55ca3ba64835dd703a0649867 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 19:00:51 +0100 Subject: drm/i915: Record BSD engine error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 5 +++++ drivers/gpu/drm/i915/i915_irq.c | 9 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 4 files changed, 24 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 58cf60d89f68..56f7ced16f1a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -634,6 +634,12 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); + seq_printf(m, "Video (BSD) command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); + seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); + seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); + seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); } seq_printf(m, "Render command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 05bff9e6a9cb..ec582b6d2113 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -154,6 +154,11 @@ struct drm_i915_error_state { u32 bcs_ipeir; u32 bcs_instdone; u32 bcs_seqno; + u32 vcs_acthd; /* gen6+ bsd engine */ + u32 vcs_ipehr; + u32 vcs_ipeir; + u32 vcs_instdone; + u32 vcs_seqno; u32 instpm; u32 instps; u32 instdone1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4ff39dee7f1c..90c071d37748 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -595,6 +595,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->error = 0; if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); + error->bcs_acthd = I915_READ(BCS_ACTHD); error->bcs_ipehr = I915_READ(BCS_IPEHR); error->bcs_ipeir = I915_READ(BCS_IPEIR); @@ -602,6 +603,14 @@ static void i915_capture_error_state(struct drm_device *dev) error->bcs_seqno = 0; if (dev_priv->blt_ring.get_seqno) error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); + + error->vcs_acthd = I915_READ(VCS_ACTHD); + error->vcs_ipehr = I915_READ(VCS_IPEHR); + error->vcs_ipeir = I915_READ(VCS_IPEIR); + error->vcs_instdone = I915_READ(VCS_INSTDONE); + error->vcs_seqno = 0; + if (dev_priv->bsd_ring.get_seqno) + error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b173e5b77f96..c79d4ba4fb12 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -305,6 +305,10 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define VCS_INSTDONE 0x1206C +#define VCS_IPEIR 0x12064 +#define VCS_IPEHR 0x12068 +#define VCS_ACTHD 0x12074 #define BCS_INSTDONE 0x2206C #define BCS_IPEIR 0x22064 #define BCS_IPEHR 0x22068 -- cgit v1.2.3 From e5c652603680404683fd1f262b511340545179a2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Nov 2010 11:35:28 +0000 Subject: drm/i915/debugfs: Report ring in error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 62 ++++++++++++++++++++----------------- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 1 + 3 files changed, 36 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 56f7ced16f1a..9cb6061bf9d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,7 @@ #include "drmP.h" #include "drm.h" #include "intel_drv.h" +#include "intel_ringbuffer.h" #include "i915_drm.h" #include "i915_drv.h" @@ -48,12 +49,6 @@ enum { DEFERRED_FREE_LIST, }; -enum { - RENDER_RING, - BSD_RING, - BLT_RING, -}; - static const char *yesno(int v) { return v ? "yes" : "no"; @@ -450,9 +445,9 @@ static int i915_hws_info(struct seq_file *m, void *data) int i; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -520,9 +515,9 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) int ret; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -554,9 +549,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct intel_ring_buffer *ring; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -574,6 +569,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) return 0; } +static const char *ring_str(int ring) +{ + switch (ring) { + case RING_RENDER: return "render"; + case RING_BSD: return "bsd"; + case RING_BLT: return "blt"; + default: return ""; + } +} + static const char *pin_flag(int pinned) { if (pinned > 0) @@ -630,14 +635,14 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "ERROR: 0x%08x\n", error->error); seq_printf(m, "Blitter command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); - seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); seq_printf(m, "Video (BSD) command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); - seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); } @@ -657,7 +662,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); for (i = 0; i < error->active_bo_count; i++) { - seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", + seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s %s", error->active_bo[i].gtt_offset, error->active_bo[i].size, error->active_bo[i].read_domains, @@ -666,7 +671,8 @@ static int i915_error_state(struct seq_file *m, void *unused) pin_flag(error->active_bo[i].pinned), tiling_flag(error->active_bo[i].tiling), dirty_flag(error->active_bo[i].dirty), - purgeable_flag(error->active_bo[i].purgeable)); + purgeable_flag(error->active_bo[i].purgeable), + ring_str(error->active_bo[i].ring)); if (error->active_bo[i].name) seq_printf(m, " (name: %d)", error->active_bo[i].name); @@ -1101,15 +1107,15 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RENDER_RING}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BLT_RING}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)BSD_RING}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING}, - {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING}, - {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING}, - {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING}, - {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 54e5b2fa5fd4..3f8786049cb6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -182,6 +182,7 @@ struct drm_i915_error_state { u32 tiling:2; u32 dirty:1; u32 purgeable:1; + u32 ring:4; } *active_bo; u32 active_bo_count; struct intel_overlay_error_state *overlay; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 90c071d37748..3ec631f1129d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -728,6 +728,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->active_bo[i].tiling = obj_priv->tiling_mode; error->active_bo[i].dirty = obj_priv->dirty; error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; + error->active_bo[i].ring = obj_priv->ring->id; if (++i == count) break; -- cgit v1.2.3 From 527f9e907c39f7e88abb57eaa8bccb43c8706a3d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Nov 2010 01:16:58 +0000 Subject: drm/i915: Remove the global irq wait queue ... as it has been replaced by per-ring waiters. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 8 +------- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_irq.c | 2 +- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9cb6061bf9d5..4c8fae9baaad 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1022,7 +1022,6 @@ i915_wedged_write(struct file *filp, loff_t *ppos) { struct drm_device *dev = filp->private_data; - drm_i915_private_t *dev_priv = dev->dev_private; char buf[20]; int val = 1; @@ -1038,12 +1037,7 @@ i915_wedged_write(struct file *filp, } DRM_INFO("Manually setting wedged to %d\n", val); - - atomic_set(&dev_priv->mm.wedged, val); - if (val) { - wake_up_all(&dev_priv->irq_queue); - queue_work(dev_priv->wq, &dev_priv->error_work); - } + i915_handle_error(dev, val); return cnt; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ff7593f70f0f..30d7a7bc6f2e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -289,7 +289,6 @@ typedef struct drm_i915_private { int current_page; int page_flipping; - wait_queue_head_t irq_queue; atomic_t irq_received; /** Protects user_irq_refcount and irq_mask_reg */ spinlock_t user_irq_lock; @@ -915,6 +914,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); +void i915_handle_error(struct drm_device *dev, bool wedged); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3ec631f1129d..4a0664ea49b9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -891,7 +891,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -static void i915_handle_error(struct drm_device *dev, bool wedged) +void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; -- cgit v1.2.3 From 8168bd48bb863c00747497aadf13884b2d69d287 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Nov 2010 17:54:52 +0000 Subject: drm/i915: Remove the definitions for Primary Ring Buffer We only ever used the PRB0, neglecting the secondary ring buffers, and now with the advent of multiple engines with separate ring buffers we need to excise the anachronisms from our code (and be explicit about which ring we mean where). This is doubly important in light of the FORCEWAKE required to read ring buffer registers on SandyBridge. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_irq.c | 28 ++++++++++++++-------------- drivers/gpu/drm/i915/i915_reg.h | 10 ++++++---- drivers/gpu/drm/i915/intel_display.c | 10 +++++----- 4 files changed, 27 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 307bad0fcef7..4cd04917868d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -106,8 +106,8 @@ void i915_kernel_lost_context(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a0664ea49b9..21034527d3a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -520,30 +520,30 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring) } static u32 -i915_ringbuffer_last_batch(struct drm_device *dev) +i915_ringbuffer_last_batch(struct drm_device *dev, + struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; u32 head, bbaddr; - u32 *ring; + u32 *val; /* Locate the current position in the ringbuffer and walk back * to find the most recently dispatched batch buffer. */ bbaddr = 0; - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring = (u32 *)(dev_priv->render_ring.virtual_start + head); + head = I915_READ_HEAD(ring) & HEAD_ADDR; + val = (u32 *)(ring->virtual_start + head); - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) break; } if (bbaddr == 0) { - ring = (u32 *)(dev_priv->render_ring.virtual_start - + dev_priv->render_ring.size); - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); + val = (u32 *)(ring->virtual_start + ring->size); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) break; } @@ -628,7 +628,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->bbaddr = 0; } - bbaddr = i915_ringbuffer_last_batch(dev); + bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); /* Grab the current batchbuffer, most likely to have crashed. */ batchbuffer[0] = NULL; @@ -1398,10 +1398,10 @@ void i915_hangcheck_elapsed(unsigned long data) * and break the hang. This should work on * all but the second generation chipsets. */ - u32 tmp = I915_READ(PRB0_CTL); + struct intel_ring_buffer *ring = &dev_priv->render_ring; + u32 tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); + I915_WRITE_CTL(ring, tmp); goto repeat; } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1eca8e710b9e..886c0e072490 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -256,10 +256,6 @@ * Instruction and interrupt control regs */ #define PGTBL_ER 0x02024 -#define PRB0_TAIL 0x02030 -#define PRB0_HEAD 0x02034 -#define PRB0_START 0x02038 -#define PRB0_CTL 0x0203c #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 @@ -285,10 +281,16 @@ #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#if 0 +#define PRB0_TAIL 0x02030 +#define PRB0_HEAD 0x02034 +#define PRB0_START 0x02038 +#define PRB0_CTL 0x0203c #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ +#endif #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 63770c963077..6a7f11ff66f5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1983,17 +1983,17 @@ static void intel_flush_display_plane(struct drm_device *dev, static void intel_clear_scanline_wait(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; u32 tmp; if (IS_GEN2(dev)) /* Can't break the hang on i8xx */ return; - tmp = I915_READ(PRB0_CTL); - if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); - } + ring = &dev_priv->render_ring; + tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) + I915_WRITE_CTL(ring, tmp); } static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) -- cgit v1.2.3 From 3143a2bf18d12545f77dafa5b9f7fee83b001223 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Nov 2010 15:55:10 +0000 Subject: drm/i915: Convert (void)I915_READ to POSTING_READ ... and so hide the flushes from tracing. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 21034527d3a4..ef3503733ebb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -70,7 +70,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->gt_irq_mask_reg & mask) != 0) { dev_priv->gt_irq_mask_reg &= ~mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + POSTING_READ(GTIMR); } } @@ -80,7 +80,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->gt_irq_mask_reg & mask) != mask) { dev_priv->gt_irq_mask_reg |= mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + POSTING_READ(GTIMR); } } @@ -91,7 +91,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != 0) { dev_priv->irq_mask_reg &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + POSTING_READ(DEIMR); } } @@ -101,7 +101,7 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != mask) { dev_priv->irq_mask_reg |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + POSTING_READ(DEIMR); } } @@ -111,7 +111,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != 0) { dev_priv->irq_mask_reg &= ~mask; I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + POSTING_READ(IMR); } } @@ -121,7 +121,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != mask) { dev_priv->irq_mask_reg |= mask; I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + POSTING_READ(IMR); } } @@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); - (void) I915_READ(reg); + POSTING_READ(reg); } } @@ -156,7 +156,7 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); - (void) I915_READ(reg); + POSTING_READ(reg); } } @@ -321,7 +321,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); @@ -386,7 +386,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) done: I915_WRITE(DEIER, de_ier); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); return ret; } @@ -796,7 +796,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); @@ -804,7 +804,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -815,7 +815,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -846,7 +846,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); - (void)I915_READ(IPEIR); + POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); @@ -863,12 +863,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } } I915_WRITE(EIR, eir); - (void)I915_READ(EIR); + POSTING_READ(EIR); eir = I915_READ(EIR); if (eir) { /* @@ -1435,17 +1435,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev) I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); - (void) I915_READ(DEIER); + POSTING_READ(DEIER); /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); - (void) I915_READ(GTIER); + POSTING_READ(GTIER); /* south display irq */ I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); - (void) I915_READ(SDEIER); + POSTING_READ(SDEIER); } static int ironlake_irq_postinstall(struct drm_device *dev) @@ -1464,7 +1464,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask_reg); I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); - (void) I915_READ(DEIER); + POSTING_READ(DEIER); if (IS_GEN6(dev)) { render_mask = @@ -1485,7 +1485,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) } I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); - (void) I915_READ(GTIER); + POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | @@ -1501,7 +1501,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); - (void) I915_READ(SDEIER); + POSTING_READ(SDEIER); if (IS_IRONLAKE_M(dev)) { /* Clear & enable PCU event interrupts */ @@ -1537,7 +1537,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - (void) I915_READ(IER); + POSTING_READ(IER); } /* @@ -1591,7 +1591,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) I915_WRITE(IMR, dev_priv->irq_mask_reg); I915_WRITE(IER, enable_mask); - (void) I915_READ(IER); + POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); -- cgit v1.2.3 From c724e8a9407683a8a2ee8eb00b972badf237bbe1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Nov 2010 08:07:02 +0000 Subject: drm/i915: Capture pinned buffers on error The pinned buffers are useful for diagnosing errors in setting up state for the chipset, which may not necessarily be 'active' at the time of the error, e.g. the cursor buffer object. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 63 +++++++++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_irq.c | 87 ++++++++++++++++++++++++------------- 3 files changed, 98 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4c8fae9baaad..24a88ac63212 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -609,6 +609,36 @@ static const char *purgeable_flag(int purgeable) return purgeable ? " purgeable" : ""; } +static void print_error_buffers(struct seq_file *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + seq_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->seqno, + pin_flag(err->pinned), + tiling_flag(err->tiling), + dirty_flag(err->dirty), + purgeable_flag(err->purgeable), + ring_str(err->ring)); + + if (err->name) + seq_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", err->fence_reg); + + seq_printf(m, "\n"); + err++; + } +} + static int i915_error_state(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -658,30 +688,15 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); seq_printf(m, " seqno: 0x%08x\n", error->seqno); - if (error->active_bo_count) { - seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); - - for (i = 0; i < error->active_bo_count; i++) { - seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s %s", - error->active_bo[i].gtt_offset, - error->active_bo[i].size, - error->active_bo[i].read_domains, - error->active_bo[i].write_domain, - error->active_bo[i].seqno, - pin_flag(error->active_bo[i].pinned), - tiling_flag(error->active_bo[i].tiling), - dirty_flag(error->active_bo[i].dirty), - purgeable_flag(error->active_bo[i].purgeable), - ring_str(error->active_bo[i].ring)); - - if (error->active_bo[i].name) - seq_printf(m, " (name: %d)", error->active_bo[i].name); - if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); - - seq_printf(m, "\n"); - } - } + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo, + error->active_bo_count); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo, + error->pinned_bo_count); for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { if (error->batchbuffer[i]) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73a41f7ab8c6..826c7237409b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -184,8 +184,8 @@ struct drm_i915_error_state { u32 dirty:1; u32 purgeable:1; u32 ring:4; - } *active_bo; - u32 active_bo_count; + } *active_bo, *pinned_bo; + u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ef3503733ebb..bbcd5da89ba5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -552,6 +552,40 @@ i915_ringbuffer_last_batch(struct drm_device *dev, return bbaddr; } +static u32 capture_bo_list(struct drm_i915_error_buffer *err, + int count, + struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + err->size = obj->base.size; + err->name = obj->base.name; + err->seqno = obj->last_rendering_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring->id; + + if (++i == count) + break; + + err++; + } + + return i; +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -700,42 +734,35 @@ static void i915_capture_error_state(struct drm_device *dev) error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.gem_object); - /* Record buffers on the active list. */ + /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; - error->active_bo_count = 0; + error->pinned_bo = NULL; - if (count) + error->active_bo_count = count; + list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) + count++; + error->pinned_bo_count = count - error->active_bo_count; + + if (count) { error->active_bo = kmalloc(sizeof(*error->active_bo)*count, GFP_ATOMIC); - - if (error->active_bo) { - int i = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - - error->active_bo[i].size = obj->size; - error->active_bo[i].name = obj->name; - error->active_bo[i].seqno = obj_priv->last_rendering_seqno; - error->active_bo[i].gtt_offset = obj_priv->gtt_offset; - error->active_bo[i].read_domains = obj->read_domains; - error->active_bo[i].write_domain = obj->write_domain; - error->active_bo[i].fence_reg = obj_priv->fence_reg; - error->active_bo[i].pinned = 0; - if (obj_priv->pin_count > 0) - error->active_bo[i].pinned = 1; - if (obj_priv->user_pin_count > 0) - error->active_bo[i].pinned = -1; - error->active_bo[i].tiling = obj_priv->tiling_mode; - error->active_bo[i].dirty = obj_priv->dirty; - error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; - error->active_bo[i].ring = obj_priv->ring->id; - - if (++i == count) - break; - } - error->active_bo_count = i; + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; } + if (error->active_bo) + error->active_bo_count = + capture_bo_list(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_bo_list(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.pinned_list); + do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); -- cgit v1.2.3 From c4a1d9e4dc5d5313cfec2cc0c9d630efe8a6f287 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 21 Nov 2010 13:12:35 +0000 Subject: drm/i915: Capture interesting display registers on error When trying to diagnose mysterious errors on resume, capture the display register contents as well. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 + drivers/gpu/drm/i915/i915_drv.h | 8 +++ drivers/gpu/drm/i915/i915_irq.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 6 +- drivers/gpu/drm/i915/intel_display.c | 110 +++++++++++++++++++++++++++++++++++ 5 files changed, 127 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 24a88ac63212..421b8414b577 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -729,6 +729,9 @@ static int i915_error_state(struct seq_file *m, void *unused) if (error->overlay) intel_overlay_print_error_state(m, error->overlay); + if (error->display) + intel_display_print_error_state(m, dev, error->display); + out: spin_unlock_irqrestore(&dev_priv->error_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 826c7237409b..4c20ad92c0f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -140,6 +140,8 @@ struct sdvo_device_mapping { u8 ddc_pin; }; +struct intel_display_error_state; + struct drm_i915_error_state { u32 eir; u32 pgtbl_er; @@ -187,6 +189,7 @@ struct drm_i915_error_state { } *active_bo, *pinned_bo; u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; }; struct drm_i915_display_funcs { @@ -1223,6 +1226,11 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + +extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern void intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error); #endif /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index bbcd5da89ba5..0b6052abedd1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -766,6 +766,7 @@ static void i915_capture_error_state(struct drm_device *dev) do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); spin_lock_irqsave(&dev_priv->error_lock, flags); if (dev_priv->first_error == NULL) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 886c0e072490..ec2a8b07ba5b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1193,7 +1193,6 @@ #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) -#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) /* VGA port control */ @@ -2207,6 +2206,7 @@ #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) +#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) @@ -2376,6 +2376,10 @@ #define CURBBASE 0x700c4 #define CURBPOS 0x700c8 +#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) +#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) +#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) + /* Display A control */ #define DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3fa5aaa941d2..d4bc443f43fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6269,3 +6269,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); return 0; } + +#ifdef CONFIG_DEBUG_FS +#include + +struct intel_display_error_state { + struct intel_cursor_error_state { + u32 control; + u32 position; + u32 base; + u32 size; + } cursor[2]; + + struct intel_pipe_error_state { + u32 conf; + u32 source; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } pipe[2]; + + struct intel_plane_error_state { + u32 control; + u32 stride; + u32 size; + u32 pos; + u32 addr; + u32 surface; + u32 tile_offset; + } plane[2]; +}; + +struct intel_display_error_state * +intel_display_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_display_error_state *error; + int i; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + for (i = 0; i < 2; i++) { + error->cursor[i].control = I915_READ(CURCNTR(i)); + error->cursor[i].position = I915_READ(CURPOS(i)); + error->cursor[i].base = I915_READ(CURBASE(i)); + + error->plane[i].control = I915_READ(DSPCNTR(i)); + error->plane[i].stride = I915_READ(DSPSTRIDE(i)); + error->plane[i].size = I915_READ(DSPSIZE(i)); + error->plane[i].pos= I915_READ(DSPPOS(i)); + error->plane[i].addr = I915_READ(DSPADDR(i)); + if (INTEL_INFO(dev)->gen >= 4) { + error->plane[i].surface = I915_READ(DSPSURF(i)); + error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); + } + + error->pipe[i].conf = I915_READ(PIPECONF(i)); + error->pipe[i].source = I915_READ(PIPESRC(i)); + error->pipe[i].htotal = I915_READ(HTOTAL(i)); + error->pipe[i].hblank = I915_READ(HBLANK(i)); + error->pipe[i].hsync = I915_READ(HSYNC(i)); + error->pipe[i].vtotal = I915_READ(VTOTAL(i)); + error->pipe[i].vblank = I915_READ(VBLANK(i)); + error->pipe[i].vsync = I915_READ(VSYNC(i)); + } + + return error; +} + +void +intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error) +{ + int i; + + for (i = 0; i < 2; i++) { + seq_printf(m, "Pipe [%d]:\n", i); + seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); + seq_printf(m, " SRC: %08x\n", error->pipe[i].source); + seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); + seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); + seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); + seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); + seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); + seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); + + seq_printf(m, "Plane [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->plane[i].control); + seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); + seq_printf(m, " SIZE: %08x\n", error->plane[i].size); + seq_printf(m, " POS: %08x\n", error->plane[i].pos); + seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); + if (INTEL_INFO(dev)->gen >= 4) { + seq_printf(m, " SURF: %08x\n", error->plane[i].surface); + seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); + } + + seq_printf(m, "Cursor [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); + seq_printf(m, " POS: %08x\n", error->cursor[i].position); + seq_printf(m, " BASE: %08x\n", error->cursor[i].base); + } +} +#endif -- cgit v1.2.3 From 3685092b717882bb9b6801bf3e4b02a106e3b129 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 08:49:38 +0000 Subject: drm/i915: Avoid oops when capturing NULL ring for inactive pinned buffers Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++--- drivers/gpu/drm/i915/i915_irq.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 421b8414b577..4fe49e0228ef 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -572,9 +572,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) static const char *ring_str(int ring) { switch (ring) { - case RING_RENDER: return "render"; - case RING_BSD: return "bsd"; - case RING_BLT: return "blt"; + case RING_RENDER: return " render"; + case RING_BSD: return " bsd"; + case RING_BLT: return " blt"; default: return ""; } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0b6052abedd1..a8f55f061f6d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -575,7 +575,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, err->tiling = obj->tiling_mode; err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring->id; + err->ring = obj->ring ? obj->ring->id : 0; if (++i == count) break; -- cgit v1.2.3 From 05394f3975dceb107a5e1393e2244946e5b43660 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Nov 2010 19:18:58 +0000 Subject: drm/i915: Use drm_i915_gem_object as the preferred type A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and many characters! Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 64 +- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 84 +- drivers/gpu/drm/i915/i915_gem.c | 1268 +++++++++++++++---------------- drivers/gpu/drm/i915/i915_gem_debug.c | 23 +- drivers/gpu/drm/i915/i915_gem_evict.c | 67 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 68 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 104 ++- drivers/gpu/drm/i915/i915_irq.c | 67 +- drivers/gpu/drm/i915/i915_trace.h | 41 +- drivers/gpu/drm/i915/intel_display.c | 242 +++--- drivers/gpu/drm/i915/intel_drv.h | 15 +- drivers/gpu/drm/i915/intel_fb.c | 25 +- drivers/gpu/drm/i915/intel_overlay.c | 48 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 54 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 16 files changed, 1019 insertions(+), 1157 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4fe49e0228ef..1e8cd74d18d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_pin_flag(struct drm_i915_gem_object *obj) { - if (obj_priv->user_pin_count > 0) + if (obj->user_pin_count > 0) return "P"; - else if (obj_priv->pin_count > 0) + else if (obj->pin_count > 0) return "p"; else return " "; } -static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) { - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { default: case I915_TILING_NONE: return " "; case I915_TILING_X: return "X"; @@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct list_head *head; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count, ret; @@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, mm_list) { + list_for_each_entry(obj, head, mm_list) { seq_printf(m, " "); - describe_obj(m, obj_priv); + describe_obj(m, obj); seq_printf(m, "\n"); - total_obj_size += obj_priv->base.size; - total_gtt_size += obj_priv->gtt_space->size; + total_obj_size += obj->base.size; + total_gtt_size += obj->gtt_space->size; count++; } mutex_unlock(&dev->struct_mutex); @@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "%d prepares\n", work->pending); if (work->old_fb_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); - if(obj_priv) - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->old_fb_obj; + if (obj) + seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } if (work->pending_flip_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); - if(obj_priv) - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->pending_flip_obj; + if (obj) + seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; + struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; seq_printf(m, "Fenced object[%2d] = ", i); if (obj == NULL) seq_printf(m, "unused"); else - describe_obj(m, to_intel_bo(obj)); + describe_obj(m, obj); seq_printf(m, "\n"); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data) static void i915_dump_object(struct seq_file *m, struct io_mapping *mapping, - struct drm_i915_gem_object *obj_priv) + struct drm_i915_gem_object *obj) { int page, page_count, i; - page_count = obj_priv->base.size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (page = 0; page < page_count; page++) { u32 *mem = io_mapping_map_wc(mapping, - obj_priv->gtt_offset + page * PAGE_SIZE); + obj->gtt_offset + page * PAGE_SIZE); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); io_mapping_unmap(mem); @@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - obj = &obj_priv->base; - if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - seq_printf(m, "--- gtt_offset = 0x%08x\n", - obj_priv->gtt_offset); - i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); + i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); } } mutex_unlock(&dev->struct_mutex); - return 0; } @@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) if (ret) return ret; - if (!ring->gem_object) { + if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { u8 *virt = ring->virtual_start; @@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { @@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7084de7c4c55..7960fd63ecb1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.gem_object != NULL) { + if (dev_priv->render_ring.obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc371d987aa7..22d6388b331f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -32,7 +32,6 @@ #include "i915_reg.h" #include "intel_bios.h" -#include "i915_trace.h" #include "intel_ringbuffer.h" #include #include @@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object { int id; struct page **page_list; drm_dma_handle_t *handle; - struct drm_gem_object *cur_obj; + struct drm_i915_gem_object *cur_obj; }; struct mem_block { @@ -125,7 +124,7 @@ struct drm_i915_master_private { #define I915_FENCE_REG_NONE -1 struct drm_i915_fence_reg { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct list_head lru_list; bool gpu; }; @@ -280,9 +279,9 @@ typedef struct drm_i915_private { uint32_t counter; unsigned int seqno_gfx_addr; drm_local_map_t hws_map; - struct drm_gem_object *seqno_obj; - struct drm_gem_object *pwrctx; - struct drm_gem_object *renderctx; + struct drm_i915_gem_object *seqno_obj; + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; struct resource mch_res; @@ -690,14 +689,14 @@ typedef struct drm_i915_private { u8 fmax; u8 fstart; - u64 last_count1; - unsigned long last_time1; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - int c_m; - int r_t; - u8 corr; + u64 last_count1; + unsigned long last_time1; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + int c_m; + int r_t; + u8 corr; spinlock_t *mchdev_lock; enum no_fbc_reason no_fbc_reason; @@ -711,7 +710,6 @@ typedef struct drm_i915_private { struct intel_fbdev *fbdev; } drm_i915_private_t; -/** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; @@ -918,7 +916,7 @@ enum intel_chip_family { #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte @@ -947,6 +945,8 @@ enum intel_chip_family { #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#include "i915_trace.h" + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; @@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size); +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, +int i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, bool map_and_fenceable); -void i915_gem_object_unpin(struct drm_gem_object *obj); -int i915_gem_object_unbind(struct drm_gem_object *obj); -void i915_gem_release_mmap(struct drm_gem_object *obj); +void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +int i915_gem_object_unbind(struct drm_i915_gem_object *obj); +void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); /** @@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, +int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, bool interruptible); -int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, +int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_gem_object *obj); -int i915_gem_object_set_domain(struct drm_gem_object *obj, +void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +int i915_gem_object_set_domain(struct drm_i915_gem_object *obj, uint32_t read_domains, uint32_t write_domain); int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, @@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev, bool interruptible, struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, +int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write); -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, +int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, bool pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align); void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); +void i915_gem_release(struct drm_device *dev, struct drm_file *file); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int i915_gem_gtt_bind_object(struct drm_gem_object *obj); -void i915_gem_gtt_unbind_object(struct drm_gem_object *obj); +int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, @@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); -void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); -void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif -void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, + int handle); +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); /* i915_debugfs.c */ @@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m, * In that case, we don't need to do it when GEM is initialized as nobody else * has access to the ring. */ -#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ + if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file_priv); \ + LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3cac366b3053..d196895527a6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -41,29 +41,30 @@ struct change_domains { uint32_t flush_rings; }; -static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); -static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); +static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj); +static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj); -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, +static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, bool pipelined); -static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); -static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); -static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write); -static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); -static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible); -static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, +static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); -static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); -static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj); +static int i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv); -static void i915_gem_free_object_tail(struct drm_gem_object *obj); + struct drm_file *file); +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); static int i915_gem_inactive_shrink(struct shrinker *shrinker, int nr_to_scan, @@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) } static inline bool -i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) +i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj_priv->gtt_space && - !obj_priv->active && - obj_priv->pin_count == 0; + return obj->gtt_space && !obj->active && obj->pin_count == 0; } int i915_gem_do_init(struct drm_device *dev, @@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev, int i915_gem_init_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_init *args = data; int ret; @@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; @@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_create *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; u32 handle; @@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOMEM; - ret = drm_gem_handle_create(file_priv, obj, &handle); + ret = drm_gem_handle_create(file, &obj->base, &handle); if (ret) { - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev->dev_private, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev->dev_private, obj->base.size); kfree(obj); return ret; } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); trace_i915_gem_object_create(obj); args->handle = handle; return 0; } -static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) +static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = obj->dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && - obj_priv->tiling_mode != I915_TILING_NONE; + obj->tiling_mode != I915_TILING_NONE; } static inline void @@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page, * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). */ static int -i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, * and not take page faults. */ static int -i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -575,11 +571,10 @@ out: */ int i915_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pread *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; if (args->size == 0) @@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check source. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } @@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_fast(dev, obj, args, file); if (ret == -EFAULT) - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_slow(dev, obj, args, file); out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping, * user into the GTT, uncached. */ static int -i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; @@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). */ static int -i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t gtt_page_base, offset; @@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto out_unpin_pages; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -827,12 +820,12 @@ out_unpin_pages: * copy_from_user into the kmapped pages backing the object. */ static int -i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { struct page *page; @@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * struct_mutex is held. */ static int -i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { struct page *page; @@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (args->size == 0) @@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check destination. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } @@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj_priv->phys_obj) + if (obj->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file); - else if (obj_priv->tiling_mode == I915_TILING_NONE && - obj_priv->gtt_space && - obj->write_domain != I915_GEM_DOMAIN_CPU) { + else if (obj->tiling_mode == I915_TILING_NONE && + obj->gtt_space && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; @@ -1092,7 +1082,7 @@ out_unpin: } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1104,12 +1094,11 @@ unlock: */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_set_domain *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t read_domains = args->read_domains; uint32_t write_domain = args->write_domain; int ret; @@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); intel_mark_busy(dev, obj); @@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Update the LRU on the fence for the CPU access that's * about to occur. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } @@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, } /* Maintain LRU order of "inactive" objects */ - if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + if (ret == 0 && i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1181,10 +1169,10 @@ unlock: */ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_sw_finish *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret = 0; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } /* Pinned buffers may be scanout, so flush the cache */ - if (to_intel_bo(obj)->pin_count) + if (obj->pin_count) i915_gem_object_flush_cpu_write_domain(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1219,7 +1207,7 @@ unlock: */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap *args = data; @@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) return -ENOENT; @@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, */ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); - BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); + BUG_ON(obj->pin_count && !obj->pin_mappable); - if (obj_priv->gtt_space) { - if (!obj_priv->map_and_fenceable) { + if (obj->gtt_space) { + if (!obj->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret) goto unlock; } } - if (!obj_priv->gtt_space) { + if (!obj->gtt_space) { ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; @@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unlock; - if (!obj_priv->fault_mappable) { - obj_priv->fault_mappable = true; - i915_gem_info_update_mappable(dev_priv, obj_priv, true); + if (!obj->fault_mappable) { + obj->fault_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); } /* Need a new fence register? */ - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, true); if (ret) goto unlock; } - if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + + pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + page_offset; /* Finally, remap it using the new GTT offset */ @@ -1356,36 +1343,39 @@ unlock: * This routine allocates and attaches a fake offset for @obj. */ static int -i915_gem_create_mmap_offset(struct drm_gem_object *obj) +i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_map_list *list; struct drm_local_map *map; int ret = 0; /* Set the object up for mmap'ing */ - list = &obj->map_list; + list = &obj->base.map_list; list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); if (!list->map) return -ENOMEM; map = list->map; map->type = _DRM_GEM; - map->size = obj->size; + map->size = obj->base.size; map->handle = obj; /* Get a DRM GEM mmap offset allocated... */ list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, 0); + obj->base.size / PAGE_SIZE, + 0, 0); if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); + DRM_ERROR("failed to allocate offset for bo %d\n", + obj->base.name); ret = -ENOSPC; goto out_free_list; } list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); + obj->base.size / PAGE_SIZE, + 0); if (!list->file_offset_node) { ret = -ENOMEM; goto out_free_list; @@ -1424,29 +1414,28 @@ out_free_list: * fixup by i915_gem_fault(). */ void -i915_gem_release_mmap(struct drm_gem_object *obj) +i915_gem_release_mmap(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (unlikely(obj->map_list.map && dev->dev_mapping)) + if (unlikely(obj->base.map_list.map && dev->dev_mapping)) unmap_mapping_range(dev->dev_mapping, - (loff_t)obj->map_list.hash.key<size, 1); + (loff_t)obj->base.map_list.hash.key<base.size, 1); - if (obj_priv->fault_mappable) { - obj_priv->fault_mappable = false; - i915_gem_info_update_mappable(dev_priv, obj_priv, false); + if (obj->fault_mappable) { + obj->fault_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); } } static void -i915_gem_free_mmap_offset(struct drm_gem_object *obj) +i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list = &obj->map_list; + struct drm_map_list *list = &obj->base.map_list; drm_ht_remove_item(&mm->offset_hash, &list->hash); drm_mm_put_block(list->file_offset_node); @@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) * potential fence register mapping. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4 || - obj_priv->tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - return i915_gem_get_gtt_size(obj_priv); + return i915_gem_get_gtt_size(obj); } /** @@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) * unfenced tiled surface requirements. */ static uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; int tile_height; /* * Minimum alignment is 4k (GTT page size) for sane hw. */ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || - obj_priv->tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* @@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) * placed in a fenced gtt region). */ if (IS_GEN2(dev) || - (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_height = 32; else tile_height = 8; - return tile_height * obj_priv->stride * 2; + return tile_height * obj->stride * 2; } static uint32_t -i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; uint32_t size; /* @@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4) - return obj_priv->base.size; + return obj->base.size; /* * Previous chips need to be aligned to the size of the smallest @@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) else size = 512*1024; - while (size < obj_priv->base.size) + while (size < obj->base.size) size <<= 1; return size; @@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data - * @file_priv: GEM object info + * @file: GEM object info * * Simply returns the fake offset to userspace so it can mmap it. * The mmap call will end up in drm_gem_mmap(), which will set things @@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) */ int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap_gtt *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj->size > dev_priv->mm.gtt_mappable_end) { + if (obj->base.size > dev_priv->mm.gtt_mappable_end) { ret = -E2BIG; goto unlock; } - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (!obj->map_list.map) { + if (!obj->base.map_list.map) { ret = i915_gem_create_mmap_offset(obj); if (ret) goto out; } - args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; + args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } static int -i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, gfp_t gfpmask) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count, i; struct address_space *mapping; struct inode *inode; @@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ - page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->pages != NULL); - obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); - if (obj_priv->pages == NULL) + page_count = obj->base.size / PAGE_SIZE; + BUG_ON(obj->pages != NULL); + obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + if (obj->pages == NULL) return -ENOMEM; - inode = obj->filp->f_path.dentry->d_inode; + inode = obj->base.filp->f_path.dentry->d_inode; mapping = inode->i_mapping; for (i = 0; i < page_count; i++) { page = read_cache_page_gfp(mapping, i, @@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, if (IS_ERR(page)) goto err_pages; - obj_priv->pages[i] = page; + obj->pages[i] = page; } - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_do_bit_17_swizzle(obj); return 0; err_pages: while (i--) - page_cache_release(obj_priv->pages[i]); + page_cache_release(obj->pages[i]); - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; + drm_free_large(obj->pages); + obj->pages = NULL; return PTR_ERR(page); } static void -i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size / PAGE_SIZE; + int page_count = obj->base.size / PAGE_SIZE; int i; - BUG_ON(obj_priv->madv == __I915_MADV_PURGED); + BUG_ON(obj->madv == __I915_MADV_PURGED); - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); - if (obj_priv->madv == I915_MADV_DONTNEED) - obj_priv->dirty = 0; + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; for (i = 0; i < page_count; i++) { - if (obj_priv->dirty) - set_page_dirty(obj_priv->pages[i]); + if (obj->dirty) + set_page_dirty(obj->pages[i]); - if (obj_priv->madv == I915_MADV_WILLNEED) - mark_page_accessed(obj_priv->pages[i]); + if (obj->madv == I915_MADV_WILLNEED) + mark_page_accessed(obj->pages[i]); - page_cache_release(obj_priv->pages[i]); + page_cache_release(obj->pages[i]); } - obj_priv->dirty = 0; + obj->dirty = 0; - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; + drm_free_large(obj->pages); + obj->pages = NULL; } static uint32_t @@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev, } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, +i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); - obj_priv->ring = ring; + obj->ring = ring; /* Add a reference if we're newly entering the active list. */ - if (!obj_priv->active) { - drm_gem_object_reference(obj); - obj_priv->active = 1; + if (!obj->active) { + drm_gem_object_reference(&obj->base); + obj->active = 1; } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); - list_move_tail(&obj_priv->ring_list, &ring->active_list); - obj_priv->last_rendering_seqno = seqno; + list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj->ring_list, &ring->active_list); + obj->last_rendering_seqno = seqno; } static void -i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); - list_del_init(&obj_priv->ring_list); - obj_priv->last_rendering_seqno = 0; + BUG_ON(!obj->active); + list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj->ring_list); + obj->last_rendering_seqno = 0; } /* Immediately discard the backing storage */ static void -i915_gem_object_truncate(struct drm_gem_object *obj) +i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; /* Our goal here is to return as much of the memory as @@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj) * backing pages, *now*. Here we mirror the actions taken * when by shmem_delete_inode() to release the backing store. */ - inode = obj->filp->f_path.dentry->d_inode; + inode = obj->base.filp->f_path.dentry->d_inode; truncate_inode_pages(inode->i_mapping, 0); if (inode->i_op->truncate_range) inode->i_op->truncate_range(inode, 0, (loff_t)-1); - obj_priv->madv = __I915_MADV_PURGED; + obj->madv = __I915_MADV_PURGED; } static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) { - return obj_priv->madv == I915_MADV_DONTNEED; + return obj->madv == I915_MADV_DONTNEED; } static void -i915_gem_object_move_to_inactive(struct drm_gem_object *obj) +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); + if (obj->pin_count != 0) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - list_del_init(&obj_priv->ring_list); + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj->ring_list); - BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + BUG_ON(!list_empty(&obj->gpu_write_list)); - obj_priv->last_rendering_seqno = 0; - obj_priv->ring = NULL; - if (obj_priv->active) { - obj_priv->active = 0; - drm_gem_object_unreference(obj); + obj->last_rendering_seqno = 0; + obj->ring = NULL; + if (obj->active) { + obj->active = 0; + drm_gem_object_unreference(&obj->base); } WARN_ON(i915_verify_lists(dev)); } @@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv, *next; + struct drm_i915_gem_object *obj, *next; - list_for_each_entry_safe(obj_priv, next, + list_for_each_entry_safe(obj, next, &ring->gpu_write_list, gpu_write_list) { - struct drm_gem_object *obj = &obj_priv->base; + if (obj->base.write_domain & flush_domains) { + uint32_t old_write_domain = obj->base.write_domain; - if (obj->write_domain & flush_domains) { - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_active(obj, ring); /* update the fence lru list */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } } @@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); } } void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); @@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev) * lost bo to the inactive list. */ while (!list_empty(&dev_priv->mm.flushing_list)) { - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - mm_list); + obj= list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + mm_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); } /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ - list_for_each_entry(obj_priv, + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { - obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } /* The fence registers are invalidated so clear them out */ @@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, * by the ringbuffer to the flushing/inactive lists as appropriate. */ while (!list_empty(&ring->active_list)) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj= list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) break; - obj = &obj_priv->base; - if (obj->write_domain != 0) + if (obj->base.write_domain != 0) i915_gem_object_move_to_flushing(obj); else i915_gem_object_move_to_inactive(obj); @@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; if (!list_empty(&dev_priv->mm.deferred_free_list)) { - struct drm_i915_gem_object *obj_priv, *tmp; + struct drm_i915_gem_object *obj, *next; /* We must be careful that during unbind() we do not * accidentally infinitely recurse into retire requests. * Currently: * retire -> free -> unbind -> wait -> retire_ring */ - list_for_each_entry_safe(obj_priv, tmp, + list_for_each_entry_safe(obj, next, &dev_priv->mm.deferred_free_list, mm_list) - i915_gem_free_object_tail(&obj_priv->base); + i915_gem_free_object_tail(obj); } i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); @@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, static void i915_gem_flush_ring(struct drm_device *dev, - struct drm_file *file_priv, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) @@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev, static void i915_gem_flush(struct drm_device *dev, - struct drm_file *file_priv, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) @@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->render_ring, + i915_gem_flush_ring(dev, &dev_priv->render_ring, invalidate_domains, flush_domains); if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->bsd_ring, + i915_gem_flush_ring(dev, &dev_priv->bsd_ring, invalidate_domains, flush_domains); if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->blt_ring, + i915_gem_flush_ring(dev, &dev_priv->blt_ring, invalidate_domains, flush_domains); } } @@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj, +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int ret; /* This function only exists to support waiting for existing rendering, * not for emitting required flushes. */ - BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); + BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. */ - if (obj_priv->active) { + if (obj->active) { ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, + obj->last_rendering_seqno, interruptible, - obj_priv->ring); + obj->ring); if (ret) return ret; } @@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, * Unbinds an object from the GTT aperture. */ int -i915_gem_object_unbind(struct drm_gem_object *obj) +i915_gem_object_unbind(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return 0; - if (obj_priv->pin_count != 0) { + if (obj->pin_count != 0) { DRM_ERROR("Attempting to unbind pinned buffer\n"); return -EINVAL; } @@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) */ if (ret) { i915_gem_clflush_object(obj); - obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } /* release the fence reg _after_ flushing */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + if (obj->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); i915_gem_gtt_unbind_object(obj); i915_gem_object_put_pages_gtt(obj); - i915_gem_info_remove_gtt(dev_priv, obj_priv); - list_del_init(&obj_priv->mm_list); + i915_gem_info_remove_gtt(dev_priv, obj); + list_del_init(&obj->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ - obj_priv->map_and_fenceable = true; + obj->map_and_fenceable = true; - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - obj_priv->gtt_offset = 0; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; + obj->gtt_offset = 0; - if (i915_gem_object_is_purgeable(obj_priv)) + if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); trace_i915_gem_object_unbind(obj); @@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev, if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; - i915_gem_flush_ring(dev, NULL, ring, + i915_gem_flush_ring(dev, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), @@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -static void sandybridge_write_fence_reg(struct drm_gem_object *obj) +static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & + val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj_priv->stride / 128) - 1) << + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << SANDYBRIDGE_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); } -static void i965_write_fence_reg(struct drm_gem_object *obj) +static void i965_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & + val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + val |= obj->gtt_offset & 0xfffff000; + val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); } -static void i915_write_fence_reg(struct drm_gem_object *obj) +static void i915_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); + u32 size = obj->gtt_space->size; uint32_t fence_reg, val, pitch_val; int tile_width; - if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || - (obj_priv->gtt_offset & (size - 1))) { + if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (obj->gtt_offset & (size - 1))) { WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", - __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, - obj_priv->gtt_space->start, obj_priv->gtt_space->size); + __func__, obj->gtt_offset, obj->map_and_fenceable, size, + obj->gtt_space->start, obj->gtt_space->size); return; } - if (obj_priv->tiling_mode == I915_TILING_Y && + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; else tile_width = 512; /* Note: pitch better be a power of two tile widths */ - pitch_val = obj_priv->stride / tile_width; + pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - if (obj_priv->tiling_mode == I915_TILING_Y && + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); else WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I915_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - fence_reg = obj_priv->fence_reg; + fence_reg = obj->fence_reg; if (fence_reg < 8) fence_reg = FENCE_REG_830_0 + fence_reg * 4; else @@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) I915_WRITE(fence_reg, val); } -static void i830_write_fence_reg(struct drm_gem_object *obj) +static void i830_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint32_t val; uint32_t pitch_val; uint32_t fence_size_bits; - if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || - (obj_priv->gtt_offset & (obj->size - 1))) { + if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (obj->gtt_offset & (obj->base.size - 1))) { WARN(1, "%s: object 0x%08x not 512K or size aligned\n", - __func__, obj_priv->gtt_offset); + __func__, obj->gtt_offset); return; } - pitch_val = obj_priv->stride / 128; + pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; fence_size_bits = I830_FENCE_SIZE_BITS(size); WARN_ON(fence_size_bits & ~0x00000f00); @@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_fence_reg *reg; - struct drm_i915_gem_object *obj_priv = NULL; + struct drm_i915_gem_object *obj = NULL; int i, avail, ret; /* First try to find a free reg */ @@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev, if (!reg->obj) return i; - obj_priv = to_intel_bo(reg->obj); - if (!obj_priv->pin_count) - avail++; + if (!reg->obj->pin_count) + avail++; } if (avail == 0) @@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev, avail = I915_FENCE_REG_NONE; list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - obj_priv = to_intel_bo(reg->obj); - if (obj_priv->pin_count) + obj = reg->obj; + if (obj->pin_count) continue; /* found one! */ - avail = obj_priv->fence_reg; + avail = obj->fence_reg; break; } @@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev, * might drop that one, causing a use-after-free in it. So hold a * private reference to obj like the other callers of put_fence_reg * (set_tiling ioctl) do. */ - drm_gem_object_reference(&obj_priv->base); - ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); - drm_gem_object_unreference(&obj_priv->base); + drm_gem_object_reference(&obj->base); + ret = i915_gem_object_put_fence_reg(obj, interruptible); + drm_gem_object_unreference(&obj->base); if (ret != 0) return ret; @@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev, * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj, +i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = NULL; int ret; /* Just update our place in the LRU if our fence is getting used. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + if (obj->fence_reg != I915_FENCE_REG_NONE) { + reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); return 0; } - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { case I915_TILING_NONE: WARN(1, "allocating a fence for non-tiled object?\n"); break; case I915_TILING_X: - if (!obj_priv->stride) + if (!obj->stride) return -EINVAL; - WARN((obj_priv->stride & (512 - 1)), + WARN((obj->stride & (512 - 1)), "object 0x%08x is X tiled but has non-512B pitch\n", - obj_priv->gtt_offset); + obj->gtt_offset); break; case I915_TILING_Y: - if (!obj_priv->stride) + if (!obj->stride) return -EINVAL; - WARN((obj_priv->stride & (128 - 1)), + WARN((obj->stride & (128 - 1)), "object 0x%08x is Y tiled but has non-128B pitch\n", - obj_priv->gtt_offset); + obj->gtt_offset); break; } @@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, if (ret < 0) return ret; - obj_priv->fence_reg = ret; - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + obj->fence_reg = ret; + reg = &dev_priv->fence_regs[obj->fence_reg]; list_add_tail(®->lru_list, &dev_priv->mm.fence_list); reg->obj = obj; @@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, } trace_i915_gem_object_get_fence(obj, - obj_priv->fence_reg, - obj_priv->tiling_mode); + obj->fence_reg, + obj->tiling_mode); return 0; } @@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, * @obj: object to clear * * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. + * data structures in dev_priv and obj. */ static void -i915_gem_clear_fence_reg(struct drm_gem_object *obj) +i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg]; uint32_t fence_reg; switch (INTEL_INFO(dev)->gen) { case 6: I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + - (obj_priv->fence_reg * 8), 0); + (obj->fence_reg * 8), 0); break; case 5: case 4: - I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0); break; case 3: - if (obj_priv->fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; + if (obj->fence_reg >= 8) + fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4; else case 2: - fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4; I915_WRITE(fence_reg, 0); break; } reg->obj = NULL; - obj_priv->fence_reg = I915_FENCE_REG_NONE; + obj->fence_reg = I915_FENCE_REG_NONE; list_del_init(®->lru_list); } @@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) * @bool: whether the wait upon the fence is interruptible * * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. + * data structures in dev_priv and obj. */ int -i915_gem_object_put_fence_reg(struct drm_gem_object *obj, +i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg; - if (obj_priv->fence_reg == I915_FENCE_REG_NONE) + if (obj->fence_reg == I915_FENCE_REG_NONE) return 0; /* If we've changed tiling, GTT-mappings of the object @@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + reg = &dev_priv->fence_regs[obj->fence_reg]; if (reg->gpu) { int ret; @@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, +i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return -EINVAL; } - fence_size = i915_gem_get_gtt_size(obj_priv); - fence_alignment = i915_gem_get_gtt_alignment(obj_priv); - unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); + fence_size = i915_gem_get_gtt_size(obj); + fence_alignment = i915_gem_get_gtt_alignment(obj); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); if (alignment == 0) alignment = map_and_fenceable ? fence_alignment : @@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, return -EINVAL; } - size = map_and_fenceable ? fence_size : obj->size; + size = map_and_fenceable ? fence_size : obj->base.size; /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > + if (obj->base.size > (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; @@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, if (free_space != NULL) { if (map_and_fenceable) - obj_priv->gtt_space = + obj->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0); else - obj_priv->gtt_space = + obj->gtt_space = drm_mm_get_block(free_space, size, alignment); } - if (obj_priv->gtt_space == NULL) { + if (obj->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ @@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ret = i915_gem_object_get_pages_gtt(obj, gfpmask); if (ret) { - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ @@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ret = i915_gem_gtt_bind_object(obj); if (ret) { i915_gem_object_put_pages_gtt(obj); - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; ret = i915_gem_evict_something(dev, size, alignment, map_and_fenceable); @@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, goto search_free; } - obj_priv->gtt_offset = obj_priv->gtt_space->start; + obj->gtt_offset = obj->gtt_space->start; /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj_priv); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + i915_gem_info_add_gtt(dev_priv, obj); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ - BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); + trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); fenceable = - obj_priv->gtt_space->size == fence_size && - (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; + obj->gtt_space->size == fence_size && + (obj->gtt_space->start & (fence_alignment -1)) == 0; mappable = - obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; + obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; - obj_priv->map_and_fenceable = mappable && fenceable; + obj->map_and_fenceable = mappable && fenceable; return 0; } void -i915_gem_clflush_object(struct drm_gem_object *obj) +i915_gem_clflush_object(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj_priv->pages == NULL) + if (obj->pages == NULL) return; trace_i915_gem_object_clflush(obj); - drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); + drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); } /** Flushes any GPU write domain for the object if it's dirty. */ static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, +i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; - if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; /* Queue the GPU write cache flushing we need. */ - i915_gem_flush_ring(dev, NULL, - to_intel_bo(obj)->ring, - 0, obj->write_domain); - BUG_ON(obj->write_domain); + i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); + BUG_ON(obj->base.write_domain); if (pipelined) return 0; @@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, /** Flushes the GTT write domain for the object if it's dirty. */ static void -i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_GTT) + if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; /* No actual flushing is required for the GTT write domain. Writes @@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) */ i915_gem_release_mmap(obj); - old_write_domain = obj->write_domain; - obj->write_domain = 0; + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } /** Flushes the CPU write domain for the object if it's dirty. */ static void -i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_CPU) + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); intel_gtt_chipset_flush(); - old_write_domain = obj->write_domain; - obj->write_domain = 0; + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } @@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) * flushes to occur. */ int -i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; ret = i915_gem_object_flush_gpu_write_domain(obj, false); @@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) return ret; } - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains |= I915_GEM_DOMAIN_GTT; + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; if (write) { - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->base.write_domain = I915_GEM_DOMAIN_GTT; + obj->dirty = 1; } trace_i915_gem_object_change_domain(obj, @@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) * wait, as in modesetting process we're not supposed to be interrupted. */ int -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, +i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; ret = i915_gem_object_flush_gpu_write_domain(obj, true); @@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, i915_gem_object_flush_cpu_write_domain(obj); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_GTT; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); + obj->base.write_domain); return 0; } @@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, + i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); - return i915_gem_object_wait_rendering(&obj->base, interruptible); + return i915_gem_object_wait_rendering(obj, interruptible); } /** @@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, * flushes to occur. */ static int -i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write) { uint32_t old_write_domain, old_read_domains; int ret; @@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) return ret; } - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* Flush the CPU cache if it's still invalid. */ - if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); - obj->read_domains |= I915_GEM_DOMAIN_CPU; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); /* If we're writing through the CPU, then the GPU read domains will * need to be invalidated at next use. */ if (write) { - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; } trace_i915_gem_object_change_domain(obj, @@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, +i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, struct change_domains *cd) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t invalidate_domains = 0; - uint32_t flush_domains = 0; + uint32_t invalidate_domains = 0, flush_domains = 0; /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (obj->pending_write_domain == 0) - obj->pending_read_domains |= obj->read_domains; + if (obj->base.pending_write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; /* * Flush the current write domain if @@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && - (obj->write_domain != obj->pending_read_domains || - obj_priv->ring != ring)) { - flush_domains |= obj->write_domain; + if (obj->base.write_domain && + (obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) { + flush_domains |= obj->base.write_domain; invalidate_domains |= - obj->pending_read_domains & ~obj->write_domain; + obj->base.pending_read_domains & ~obj->base.write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; + invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); @@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * write_domains). So if we have a current write domain that we * aren't changing, set pending_write_domain to that. */ - if (flush_domains == 0 && obj->pending_write_domain == 0) - obj->pending_write_domain = obj->write_domain; + if (flush_domains == 0 && obj->base.pending_write_domain == 0) + obj->base.pending_write_domain = obj->base.write_domain; cd->invalidate_domains |= invalidate_domains; cd->flush_domains |= flush_domains; if (flush_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= obj_priv->ring->id; + cd->flush_rings |= obj->ring->id; if (invalidate_domains & I915_GEM_GPU_DOMAINS) cd->flush_rings |= ring->id; } @@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ static void -i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (!obj_priv->page_cpu_valid) + if (!obj->page_cpu_valid) return; /* If we're partially in the CPU read domain, finish moving it in. */ - if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { int i; - for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); } } /* Free the page_cpu_valid mappings which are now stale, whether * or not we've got I915_GEM_DOMAIN_CPU. */ - kfree(obj_priv->page_cpu_valid); - obj_priv->page_cpu_valid = NULL; + kfree(obj->page_cpu_valid); + obj->page_cpu_valid = NULL; } /** @@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) * flushes to occur. */ static int -i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int i, ret; - if (offset == 0 && size == obj->size) + if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); ret = i915_gem_object_flush_gpu_write_domain(obj, false); @@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, i915_gem_object_flush_gtt_write_domain(obj); /* If we're already fully in the CPU read domain, we're done. */ - if (obj_priv->page_cpu_valid == NULL && - (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + if (obj->page_cpu_valid == NULL && + (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) return 0; /* Otherwise, create/clear the per-page CPU read domain flag if we're * newly adding I915_GEM_DOMAIN_CPU */ - if (obj_priv->page_cpu_valid == NULL) { - obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, - GFP_KERNEL); - if (obj_priv->page_cpu_valid == NULL) + if (obj->page_cpu_valid == NULL) { + obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, + GFP_KERNEL); + if (obj->page_cpu_valid == NULL) return -ENOMEM; - } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) - memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); + } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); - obj_priv->page_cpu_valid[i] = 1; + obj->page_cpu_valid[i] = 1; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_CPU; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); + obj->base.write_domain); return 0; } @@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; - ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto err; @@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { int i, ret; for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object(obj, file, @@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, static int i915_gem_execbuffer_reserve(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { @@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ret = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; bool need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; @@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, /* Check fence reg constraints and rebind if necessary */ if (need_mappable && !obj->map_and_fenceable) { - ret = i915_gem_object_unbind(&obj->base); + ret = i915_gem_object_unbind(obj); if (ret) break; } - ret = i915_gem_object_pin(&obj->base, + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); if (ret) @@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, * to properly handle blits to/from tiled surfaces. */ if (need_fence) { - ret = i915_gem_object_get_fence_reg(&obj->base, true); + ret = i915_gem_object_get_fence_reg(obj, true); if (ret) { - i915_gem_object_unpin(&obj->base); + i915_gem_object_unpin(obj); break; } @@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { struct drm_i915_gem_relocation_entry *reloc; int i, total, ret; - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); - obj->in_execbuffer = false; - } + for (i = 0; i < count; i++) + object_list[i]->in_execbuffer = false; mutex_unlock(&dev->struct_mutex); @@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object_slow(obj, file, @@ -3740,7 +3687,7 @@ static int i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, - struct drm_gem_object **objects, + struct drm_i915_gem_object **objects, int count) { struct change_domains cd; @@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, cd.invalidate_domains, cd.flush_domains); #endif - i915_gem_flush(dev, file, + i915_gem_flush(dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); } for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); + struct drm_i915_gem_object *obj = objects[i]; /* XXX replace with semaphores */ if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(&obj->base, true); + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; } @@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec_list) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object **object_list = NULL; - struct drm_gem_object *batch_obj; + struct drm_i915_gem_object **object_list = NULL; + struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_request *request = NULL; int ret, i, flips; @@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - object_list[i] = drm_gem_object_lookup(dev, file, - exec_list[i].handle); - if (object_list[i] == NULL) { + obj = to_intel_bo (drm_gem_object_lookup(dev, file, + exec_list[i].handle)); + if (obj == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; + args->buffer_count = i; ret = -ENOENT; goto err; } + object_list[i] = obj; - obj_priv = to_intel_bo(object_list[i]); - if (obj_priv->in_execbuffer) { + if (obj->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", - object_list[i]); + obj); /* prevent error path from reading uninitialized data */ args->buffer_count = i + 1; ret = -EINVAL; goto err; } - obj_priv->in_execbuffer = true; + obj->in_execbuffer = true; } /* Move the objects en-masse into the GTT, evicting if necessary. */ @@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Set the pending read domains for the batch buffer to COMMAND */ batch_obj = object_list[args->buffer_count-1]; - if (batch_obj->pending_write_domain) { + if (batch_obj->base.pending_write_domain) { DRM_ERROR("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; goto err; } - batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; /* Sanity check the batch buffer */ - exec_offset = to_intel_bo(batch_obj)->gtt_offset; + exec_offset = batch_obj->gtt_offset; ret = i915_gem_check_execbuffer(args, exec_offset); if (ret != 0) { DRM_ERROR("execbuf with invalid offset/length\n"); @@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ flips = 0; for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]->write_domain) - flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); + if (object_list[i]->base.write_domain) + flips |= atomic_read(&object_list[i]->pending_flip); } if (flips) { int plane, flip_mask; @@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj = object_list[i]; - obj->read_domains = obj->pending_read_domains; - obj->write_domain = obj->pending_write_domain; + obj->base.read_domains = obj->base.pending_read_domains; + obj->base.write_domain = obj->base.pending_write_domain; i915_gem_object_move_to_active(obj, ring); - if (obj->write_domain) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - obj_priv->dirty = 1; - list_move_tail(&obj_priv->gpu_write_list, + if (obj->base.write_domain) { + obj->dirty = 1; + list_move_tail(&obj->gpu_write_list, &ring->gpu_write_list); intel_mark_busy(dev, obj); } trace_i915_gem_object_change_domain(obj, - obj->read_domains, - obj->write_domain); + obj->base.read_domains, + obj->base.write_domain); } /* @@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, err: for (i = 0; i < args->buffer_count; i++) { - if (object_list[i] == NULL) - break; - - to_intel_bo(object_list[i])->in_execbuffer = false; - drm_gem_object_unreference(object_list[i]); + object_list[i]->in_execbuffer = false; + drm_gem_object_unreference(&object_list[i]->base); } mutex_unlock(&dev->struct_mutex); @@ -4165,7 +4108,7 @@ pre_mutex_err: */ int i915_gem_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; @@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.cliprects_ptr = args->cliprects_ptr; exec2.flags = I915_EXEC_RENDER; - ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) @@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, int i915_gem_execbuffer2(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; @@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EFAULT; } - ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user((struct drm_i915_relocation_entry __user *) @@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, } int -i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, +i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); BUG_ON(map_and_fenceable && !map_and_fenceable); WARN_ON(i915_verify_lists(dev)); - if (obj_priv->gtt_space != NULL) { - if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || - (map_and_fenceable && !obj_priv->map_and_fenceable)) { - WARN(obj_priv->pin_count, + if (obj->gtt_space != NULL) { + if ((alignment && obj->gtt_offset & (alignment - 1)) || + (map_and_fenceable && !obj->map_and_fenceable)) { + WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - obj_priv->gtt_offset, alignment, + obj->gtt_offset, alignment, map_and_fenceable, - obj_priv->map_and_fenceable); + obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); if (ret) return ret; } } - if (obj_priv->gtt_space == NULL) { + if (obj->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, map_and_fenceable); if (ret) return ret; } - if (obj_priv->pin_count++ == 0) { - i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (obj->pin_count++ == 0) { + i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable); + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); } - BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); + BUG_ON(!obj->pin_mappable && map_and_fenceable); WARN_ON(i915_verify_lists(dev)); return 0; } void -i915_gem_object_unpin(struct drm_gem_object *obj) +i915_gem_object_unpin(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); WARN_ON(i915_verify_lists(dev)); - BUG_ON(obj_priv->pin_count == 0); - BUG_ON(obj_priv->gtt_space == NULL); + BUG_ON(obj->pin_count == 0); + BUG_ON(obj->gtt_space == NULL); - if (--obj_priv->pin_count == 0) { - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (--obj->pin_count == 0) { + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj_priv); + i915_gem_info_remove_pin(dev_priv, obj); } WARN_ON(i915_verify_lists(dev)); } int i915_gem_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != NULL && obj->pin_filp != file) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count++; - obj_priv->pin_filp = file_priv; - if (obj_priv->user_pin_count == 1) { + obj->user_pin_count++; + obj->pin_filp = file; + if (obj->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; @@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj_priv->gtt_offset; + args->offset = obj->gtt_offset; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4427,38 +4367,36 @@ unlock: int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != file) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count--; - if (obj_priv->user_pin_count == 0) { - obj_priv->pin_filp = NULL; + obj->user_pin_count--; + if (obj->user_pin_count == 0) { + obj->pin_filp = NULL; i915_gem_object_unpin(obj); } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4466,52 +4404,49 @@ unlock: int i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_busy *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - args->busy = obj_priv->active; + args->busy = obj->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this * object. Userspace calling this function indicates that it wants to * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(dev, file_priv, - obj_priv->ring, - 0, obj->write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(dev, obj->ring, + 0, obj->base.write_domain); /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs * are actually unmasked, and our working set ends up being * larger than required. */ - i915_gem_retire_requests_ring(dev, obj_priv->ring); + i915_gem_retire_requests_ring(dev, obj->ring); - args->busy = obj_priv->active; + args->busy = obj->active; } - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_madvise *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; switch (args->madv) { @@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_count) { + if (obj->pin_count) { ret = -EINVAL; goto out; } - if (obj_priv->madv != __I915_MADV_PURGED) - obj_priv->madv = args->madv; + if (obj->madv != __I915_MADV_PURGED) + obj->madv = args->madv; /* if the object is no longer bound, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj_priv) && - obj_priv->gtt_space == NULL) + if (i915_gem_object_is_purgeable(obj) && + obj->gtt_space == NULL) i915_gem_object_truncate(obj); - args->retained = obj_priv->madv != __I915_MADV_PURGED; + args->retained = obj->madv != __I915_MADV_PURGED; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size) +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; @@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, /* Avoid an unnecessary call to unbind on the first bind. */ obj->map_and_fenceable = true; - return &obj->base; + return obj; } int i915_gem_init_object(struct drm_gem_object *obj) @@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) return 0; } -static void i915_gem_free_object_tail(struct drm_gem_object *obj) +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->mm_list, + list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list); return; } - if (obj->map_list.map) + if (obj->base.map_list.map) i915_gem_free_mmap_offset(obj); - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev_priv, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev_priv, obj->base.size); - kfree(obj_priv->page_cpu_valid); - kfree(obj_priv->bit_17); - kfree(obj_priv); + kfree(obj->page_cpu_valid); + kfree(obj->bit_17); + kfree(obj); } -void i915_gem_free_object(struct drm_gem_object *obj) +void i915_gem_free_object(struct drm_gem_object *gem_obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = obj->base.dev; trace_i915_gem_object_destroy(obj); - while (obj_priv->pin_count > 0) + while (obj->pin_count > 0) i915_gem_object_unpin(obj); - if (obj_priv->phys_obj) + if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); i915_gem_free_object_tail(obj); @@ -4710,8 +4642,7 @@ static int i915_gem_init_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); @@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev) ret = -ENOMEM; goto err; } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + obj->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; - dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; - dev_priv->seqno_page = kmap(obj_priv->pages[0]); + dev_priv->seqno_gfx_addr = obj->gtt_offset; + dev_priv->seqno_page = kmap(obj->pages[0]); if (dev_priv->seqno_page == NULL) goto err_unpin; @@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); err: return ret; } @@ -4750,14 +4680,12 @@ static void i915_gem_cleanup_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; obj = dev_priv->seqno_obj; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); dev_priv->seqno_obj = NULL; dev_priv->seqno_page = NULL; @@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) } void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; char *vaddr; int i; int page_count; - if (!obj_priv->phys_obj) + if (!obj->phys_obj) return; - vaddr = obj_priv->phys_obj->handle->vaddr; - - page_count = obj->size / PAGE_SIZE; + vaddr = obj->phys_obj->handle->vaddr; + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { struct page *page = read_cache_page_gfp(mapping, i, GFP_HIGHUSER | __GFP_RECLAIMABLE); @@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev, } intel_gtt_chipset_flush(); - obj_priv->phys_obj->cur_obj = NULL; - obj_priv->phys_obj = NULL; + obj->phys_obj->cur_obj = NULL; + obj->phys_obj = NULL; } int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; int ret = 0; int page_count; int i; @@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (id > I915_MAX_PHYS_OBJECT) return -EINVAL; - obj_priv = to_intel_bo(obj); - - if (obj_priv->phys_obj) { - if (obj_priv->phys_obj->id == id) + if (obj->phys_obj) { + if (obj->phys_obj->id == id) return 0; i915_gem_detach_phys_object(dev, obj); } @@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev, /* create a new object */ if (!dev_priv->mm.phys_objs[id - 1]) { ret = i915_gem_init_phys_object(dev, id, - obj->size, align); + obj->base.size, align); if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); + DRM_ERROR("failed to init phys object %d size: %zu\n", + id, obj->base.size); return ret; } } /* bind to the object */ - obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj_priv->phys_obj->cur_obj = obj; + obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj->phys_obj->cur_obj = obj; - page_count = obj->size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { struct page *page; @@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, return PTR_ERR(page); src = kmap_atomic(page); - dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(src); @@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev, } static int -i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; + void *vaddr = obj->phys_obj->handle->vaddr + args->offset; char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; - DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -5228,7 +5150,7 @@ rescan: &dev_priv->mm.inactive_list, mm_list) { if (i915_gem_object_is_purgeable(obj)) { - i915_gem_object_unbind(&obj->base); + i915_gem_object_unbind(obj); if (--nr_to_scan == 0) break; } @@ -5240,7 +5162,7 @@ rescan: &dev_priv->mm.inactive_list, mm_list) { if (nr_to_scan) { - i915_gem_object_unbind(&obj->base); + i915_gem_object_unbind(obj); nr_to_scan--; } else cnt++; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 48644b840a8d..29d014c48ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, } void -i915_gem_dump_object(struct drm_gem_object *obj, int len, +i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; - DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); + DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { int page_len, chunk, chunk_len; @@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, chunk_len = page_len - chunk; if (chunk_len > 128) chunk_len = 128; - i915_gem_dump_page(obj_priv->pages[page], + i915_gem_dump_page(obj->pages[page], chunk, chunk + chunk_len, - obj_priv->gtt_offset + + obj->gtt_offset + page * PAGE_SIZE, mark); } @@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, #if WATCH_COHERENCY void -i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) +i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int page; uint32_t *gtt_mapping; uint32_t *backing_map = NULL; int bad_count = 0; DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", - __func__, obj, obj_priv->gtt_offset, handle, + __func__, obj, obj->gtt_offset, handle, obj->size / 1024); - gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, - obj->size); + gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); if (gtt_mapping == NULL) { DRM_ERROR("failed to map GTT space\n"); return; @@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) for (page = 0; page < obj->size / PAGE_SIZE; page++) { int i; - backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); + backing_map = kmap_atomic(obj->pages[page], KM_USER0); if (backing_map == NULL) { DRM_ERROR("failed to map backing page\n"); @@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) if (cpuval != gttval) { DRM_INFO("incoherent CPU vs GPU at 0x%08x: " "0x%08x vs 0x%08x\n", - (int)(obj_priv->gtt_offset + + (int)(obj->gtt_offset + page * PAGE_SIZE + i * 4), cpuval, gttval); if (bad_count++ >= 8) { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3f6f336bbb4d..03e15d37b550 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -32,12 +32,11 @@ #include "i915_drm.h" static bool -mark_free(struct drm_i915_gem_object *obj_priv, - struct list_head *unwind) +mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { - list_add(&obj_priv->evict_list, unwind); - drm_gem_object_reference(&obj_priv->base); - return drm_mm_scan_add_block(obj_priv->gtt_space); + list_add(&obj->evict_list, unwind); + drm_gem_object_reference(&obj->base); + return drm_mm_scan_add_block(obj->gtt_space); } int @@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; i915_gem_retire_requests(dev); @@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - if (mark_free(obj_priv, &unwind_list)) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { + if (mark_free(obj, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ - if (obj_priv->base.write_domain || obj_priv->pin_count) + if (obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - if (obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { + if (obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - if (! obj_priv->base.write_domain || obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (! obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Nothing found, clean up and bail out! */ - list_for_each_entry(obj_priv, &unwind_list, evict_list) { - ret = drm_mm_scan_remove_block(obj_priv->gtt_space); + list_for_each_entry(obj, &unwind_list, evict_list) { + ret = drm_mm_scan_remove_block(obj->gtt_space); BUG_ON(ret); - drm_gem_object_unreference(&obj_priv->base); + drm_gem_object_unreference(&obj->base); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -145,26 +144,26 @@ found: * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { - obj_priv = list_first_entry(&unwind_list, - struct drm_i915_gem_object, - evict_list); - if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { - list_move(&obj_priv->evict_list, &eviction_list); + obj = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + evict_list); + if (drm_mm_scan_remove_block(obj->gtt_space)) { + list_move(&obj->evict_list, &eviction_list); continue; } - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + list_del(&obj->evict_list); + drm_gem_object_unreference(&obj->base); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - obj_priv = list_first_entry(&eviction_list, - struct drm_i915_gem_object, - evict_list); + obj = list_first_entry(&eviction_list, + struct drm_i915_gem_object, + evict_list); if (ret == 0) - ret = i915_gem_object_unbind(&obj_priv->base); - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + ret = i915_gem_object_unbind(obj); + list_del(&obj->evict_list); + drm_gem_object_unreference(&obj->base); } return ret; @@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { - int ret = i915_gem_object_unbind(&obj->base); + int ret = i915_gem_object_unbind(obj); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b34a1aee9b6..71c2b0f3747b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -32,71 +32,67 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - list_for_each_entry(obj_priv, - &dev_priv->mm.gtt_list, - gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { if (dev_priv->mm.gtt->needs_dmar) { - BUG_ON(!obj_priv->sg_list); + BUG_ON(!obj->sg_list); - intel_gtt_insert_sg_entries(obj_priv->sg_list, - obj_priv->num_sg, - obj_priv->gtt_space->start + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, - obj_priv->agp_type); + obj->agp_type); } else - intel_gtt_insert_pages(obj_priv->gtt_space->start + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj_priv->base.size >> PAGE_SHIFT, - obj_priv->pages, - obj_priv->agp_type); + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); } /* Be paranoid and flush the chipset cache. */ intel_gtt_chipset_flush(); } -int i915_gem_gtt_bind_object(struct drm_gem_object *obj) +int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; if (dev_priv->mm.gtt->needs_dmar) { - ret = intel_gtt_map_memory(obj_priv->pages, - obj->size >> PAGE_SHIFT, - &obj_priv->sg_list, - &obj_priv->num_sg); + ret = intel_gtt_map_memory(obj->pages, + obj->base.size >> PAGE_SHIFT, + &obj->sg_list, + &obj->num_sg); if (ret != 0) return ret; - intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg, - obj_priv->gtt_space->start - >> PAGE_SHIFT, - obj_priv->agp_type); + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + obj->agp_type); } else - intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT, - obj->size >> PAGE_SHIFT, - obj_priv->pages, - obj_priv->agp_type); + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); return 0; } -void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev_priv->mm.gtt->needs_dmar) { - intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg); - obj_priv->sg_list = NULL; - obj_priv->num_sg = 0; + intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); + obj->sg_list = NULL; + obj->num_sg = 0; } - intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT, - obj->size >> PAGE_SHIFT); + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a517b48d441d..1c5fdb30f272 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) /* Is the current GTT allocation valid for the change in tiling? */ static bool -i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) +i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size; if (tiling_mode == I915_TILING_NONE) return true; - if (INTEL_INFO(obj->dev)->gen >= 4) + if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (!obj_priv->gtt_space) + if (!obj->gtt_space) return true; - if (INTEL_INFO(obj->dev)->gen == 3) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (obj->gtt_offset & ~I915_FENCE_START_MASK) return false; } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + if (obj->gtt_offset & ~I830_FENCE_START_MASK) return false; } @@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - if (INTEL_INFO(obj->dev)->gen == 3) + if (INTEL_INFO(obj->base.dev)->gen == 3) size = 1024*1024; else size = 512*1024; - while (size < obj_priv->base.size) + while (size < obj->base.size) size <<= 1; - if (obj_priv->gtt_space->size != size) + if (obj->gtt_space->size != size) return false; - if (obj_priv->gtt_offset & (size - 1)) + if (obj->gtt_offset & (size - 1)) return false; return true; @@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) */ int i915_gem_set_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_gem_check_is_wedged(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); - if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { - drm_gem_object_unreference_unlocked(obj); + if (!i915_tiling_ok(dev, + args->stride, obj->base.size, args->tiling_mode)) { + drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } - if (obj_priv->pin_count) { - drm_gem_object_unreference_unlocked(obj); + if (obj->pin_count) { + drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } @@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - if (args->tiling_mode != obj_priv->tiling_mode || - args->stride != obj_priv->stride) { + if (args->tiling_mode != obj->tiling_mode || + args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but @@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); - else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + else if (obj->fence_reg != I915_FENCE_REG_NONE) ret = i915_gem_object_put_fence_reg(obj, true); else i915_gem_release_mmap(obj); if (ret != 0) { - args->tiling_mode = obj_priv->tiling_mode; - args->stride = obj_priv->stride; + args->tiling_mode = obj->tiling_mode; + args->stride = obj->stride; goto err; } - obj_priv->tiling_mode = args->tiling_mode; - obj_priv->stride = args->stride; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; } err: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return ret; @@ -375,22 +373,20 @@ err: */ int i915_gem_get_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); - args->tiling_mode = obj_priv->tiling_mode; - switch (obj_priv->tiling_mode) { + args->tiling_mode = obj->tiling_mode; + switch (obj->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; @@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return 0; @@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page) } void -i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) + if (obj->bit_17 == NULL) return; for (i = 0; i < page_count; i++) { - char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; + char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; if ((new_bit_17 & 0x1) != - (test_bit(i, obj_priv->bit_17) != 0)) { - i915_gem_swizzle_page(obj_priv->pages[i]); - set_page_dirty(obj_priv->pages[i]); + (test_bit(i, obj->bit_17) != 0)) { + i915_gem_swizzle_page(obj->pages[i]); + set_page_dirty(obj->pages[i]); } } } void -i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) { - obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + if (obj->bit_17 == NULL) { + obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), GFP_KERNEL); - if (obj_priv->bit_17 == NULL) { + if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; @@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) } for (i = 0; i < page_count; i++) { - if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) - __set_bit(i, obj_priv->bit_17); + if (page_to_phys(obj->pages[i]) & (1 << 17)) + __set_bit(i, obj->bit_17); else - __clear_bit(i, obj_priv->bit_17); + __clear_bit(i, obj->bit_17); } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a8f55f061f6d..09ac3bbd8165 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work) #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, - struct drm_gem_object *src) + struct drm_i915_gem_object *src) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_object *dst; - struct drm_i915_gem_object *src_priv; int page, page_count; u32 reloc_offset; - if (src == NULL) + if (src == NULL || src->pages == NULL) return NULL; - src_priv = to_intel_bo(src); - if (src_priv->pages == NULL) - return NULL; - - page_count = src->size / PAGE_SIZE; + page_count = src->base.size / PAGE_SIZE; dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; - reloc_offset = src_priv->gtt_offset; + reloc_offset = src->gtt_offset; for (page = 0; page < page_count; page++) { unsigned long flags; void __iomem *s; @@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev, reloc_offset += PAGE_SIZE; } dst->page_count = page_count; - dst->gtt_offset = src_priv->gtt_offset; + dst->gtt_offset = src->gtt_offset; return dst; @@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; - struct drm_gem_object *batchbuffer[2]; + struct drm_i915_gem_object *batchbuffer[2]; unsigned long flags; u32 bbaddr; int count; @@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; count++; } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev) /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, - dev_priv->render_ring.gem_object); + dev_priv->render_ring.obj); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; error->pinned_bo = NULL; error->active_bo_count = count; - list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) + list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) count++; error->pinned_bo_count = count - error->active_bo_count; @@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct intel_unpin_work *work; unsigned long flags; bool stall_detected; @@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) } /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ - obj_priv = to_intel_bo(work->pending_flip_obj); + obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; - stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; + stall_detected = I915_READ(dspsurf) == obj->gtt_offset; } else { int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; - stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitch + crtc->x * crtc->fb->bits_per_pixel/8); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 34ef49fd0377..1df7262ae077 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -6,6 +6,7 @@ #include #include +#include "i915_drv.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM i915 @@ -16,18 +17,18 @@ TRACE_EVENT(i915_gem_object_create, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, size) ), TP_fast_assign( __entry->obj = obj; - __entry->size = obj->size; + __entry->size = obj->base.size; ), TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) @@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create, TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable), + TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), TP_ARGS(obj, gtt_offset, mappable), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, gtt_offset) __field(bool, mappable) ), @@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind, TRACE_EVENT(i915_gem_object_change_domain, - TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), + TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), TP_ARGS(obj, old_read_domains, old_write_domain), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, read_domains) __field(u32, write_domain) ), TP_fast_assign( __entry->obj = obj; - __entry->read_domains = obj->read_domains | (old_read_domains << 16); - __entry->write_domain = obj->write_domain | (old_write_domain << 16); + __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); + __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); ), TP_printk("obj=%p, read=%04x, write=%04x", @@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain, TRACE_EVENT(i915_gem_object_get_fence, - TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), + TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode), TP_ARGS(obj, fence, tiling_mode), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(int, fence) __field(int, tiling_mode) ), @@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence, DECLARE_EVENT_CLASS(i915_gem_object, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); @@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, ); TRACE_EVENT(i915_flip_request, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request, ); TRACE_EVENT(i915_flip_complete, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d4bc443f43fc..ae7d4f55ce07 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; if (fb->pitch == dev_priv->cfb_pitch && - obj_priv->fence_reg == dev_priv->cfb_fence && + obj->fence_reg == dev_priv->cfb_fence && intel_crtc->plane == dev_priv->cfb_plane && I915_READ(FBC_CONTROL) & FBC_CTL_EN) return; @@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* FBC_CTL wants 64B units */ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; @@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl2 |= FBC_CTL_CPU_FENCE; I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, crtc->y); @@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl |= dev_priv->cfb_fence; I915_WRITE(FBC_CONTROL, fbc_ctl); @@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_y == crtc->y) return; @@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_offset == obj_priv->gtt_offset && + dev_priv->cfb_offset == obj->gtt_offset && dev_priv->cfb_y == crtc->y) return; @@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; - dev_priv->cfb_offset = obj_priv->gtt_offset; + dev_priv->cfb_offset = obj->gtt_offset; dev_priv->cfb_y = crtc->y; dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev) struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; DRM_DEBUG_KMS("\n"); @@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev) intel_crtc = to_intel_crtc(crtc); fb = crtc->fb; intel_fb = to_intel_framebuffer(fb); - obj_priv = to_intel_bo(intel_fb->obj); + obj = intel_fb->obj; - if (intel_fb->obj->size > dev_priv->cfb_size) { + if (intel_fb->obj->base.size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; @@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev) dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } - if (obj_priv->tiling_mode != I915_TILING_X) { + if (obj->tiling_mode != I915_TILING_X) { DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); dev_priv->no_fbc_reason = FBC_NOT_TILED; goto out_disable; @@ -1433,14 +1433,13 @@ out_disable: int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; int ret; - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; @@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, false); if (ret) goto err_unpin; @@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; @@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - obj_priv = to_intel_bo(obj); reg = DSPCNTR(plane); dspcntr = I915_READ(reg); @@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, I915_WRITE(reg, dspcntr); - Start = obj_priv->gtt_offset; + Start = obj->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", @@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. */ - ret = i915_gem_object_flush_gpu(obj_priv, false); + ret = i915_gem_object_flush_gpu(obj, false); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); @@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev) static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_private *dev_priv; if (crtc->fb == NULL) return; - obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); + obj = to_intel_framebuffer(crtc->fb)->obj; dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, } static int intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, + struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_gem_object *bo; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t addr; int ret; @@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; - bo = NULL; + obj = NULL; mutex_lock(&dev->struct_mutex); goto finish; } @@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - bo = drm_gem_object_lookup(dev, file_priv, handle); - if (!bo) + obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + if (!obj) return -ENOENT; - obj_priv = to_intel_bo(bo); - - if (bo->size < width * height * 4) { + if (obj->base.size < width * height * 4) { DRM_ERROR("buffer is to small\n"); ret = -ENOMEM; goto fail; @@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; } - ret = i915_gem_object_set_to_gtt_domain(bo, 0); + ret = i915_gem_object_set_to_gtt_domain(obj, 0); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; } - addr = obj_priv->gtt_offset; + addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, bo, + ret = i915_gem_attach_phys_object(dev, obj, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; } - addr = obj_priv->phys_obj->handle->busaddr; + addr = obj->phys_obj->handle->busaddr; } if (IS_GEN2(dev)) @@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { - if (intel_crtc->cursor_bo != bo) + if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); - drm_gem_object_unreference(intel_crtc->cursor_bo); + drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; - intel_crtc->cursor_bo = bo; + intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; @@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail_unpin: - i915_gem_object_unpin(bo); + i915_gem_object_unpin(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } @@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work) * buffer), we'll also mark the display as busy, so we know to increase its * clock frequency. */ -void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL; @@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&work->dev->struct_mutex); i915_gem_object_unpin(work->old_fb_obj); - drm_gem_object_unreference(work->pending_flip_obj); - drm_gem_object_unreference(work->old_fb_obj); + drm_gem_object_unreference(&work->pending_flip_obj->base); + drm_gem_object_unreference(&work->old_fb_obj->base); mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; struct timeval now; unsigned long flags; @@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->old_fb_obj); + obj = work->old_fb_obj; atomic_clear_mask(1 << intel_crtc->plane, - &obj_priv->pending_flip.counter); - if (atomic_read(&obj_priv->pending_flip) == 0) + &obj->pending_flip.counter); + if (atomic_read(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); @@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags, offset; @@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_work; /* Reference the objects for the scheduled work. */ - drm_gem_object_reference(work->old_fb_obj); - drm_gem_object_reference(obj); + drm_gem_object_reference(&work->old_fb_obj->base); + drm_gem_object_reference(&obj->base); crtc->fb = fb; @@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, } work->pending_flip_obj = obj; - obj_priv = to_intel_bo(obj); work->enable_stall_check = true; @@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ - atomic_add(1 << intel_crtc->plane, - &to_intel_bo(work->old_fb_obj)->pending_flip); + atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); switch (INTEL_INFO(dev)->gen) { case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); + OUT_RING(obj->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, case 6: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitch | obj_priv->tiling_mode); - OUT_RING(obj_priv->gtt_offset); + OUT_RING(fb->pitch | obj->tiling_mode); + OUT_RING(obj->gtt_offset); pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; @@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_objs: - drm_gem_object_unreference(work->old_fb_obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); cleanup_work: mutex_unlock(&dev->struct_mutex); @@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; @@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(intel_fb->obj); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); kfree(intel_fb); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, + struct drm_file *file, unsigned int *handle) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_gem_object *object = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb->obj; - return drm_gem_handle_create(file_priv, object, handle); + return drm_gem_handle_create(file, &obj->base, handle); } static const struct drm_framebuffer_funcs intel_fb_funcs = { @@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) return -EINVAL; if (mode_cmd->pitch & 63) @@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_framebuffer *intel_fb; int ret; - obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); if (!obj) return ERR_PTR(-ENOENT); @@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev, if (!intel_fb) return ERR_PTR(-ENOMEM); - ret = intel_framebuffer_init(dev, intel_fb, - mode_cmd, obj); + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference_unlocked(&obj->base); kfree(intel_fb); return ERR_PTR(ret); } @@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static struct drm_gem_object * +static struct drm_i915_gem_object * intel_alloc_context_page(struct drm_device *dev) { - struct drm_gem_object *ctx; + struct drm_i915_gem_object *ctx; int ret; ctx = i915_gem_alloc_object(dev, 4096); @@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev) err_unpin: i915_gem_object_unpin(ctx); err_unref: - drm_gem_object_unreference(ctx); + drm_gem_object_unreference(&ctx->base); mutex_unlock(&dev->struct_mutex); return NULL; } @@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev) if (dev_priv->renderctx == NULL) dev_priv->renderctx = intel_alloc_context_page(dev); if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; - obj_priv = to_intel_bo(dev_priv->renderctx); - if (obj_priv) { - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_SET_CONTEXT); - OUT_RING(obj_priv->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); - } + struct drm_i915_gem_object *obj = dev_priv->renderctx; + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_SET_CONTEXT); + OUT_RING(obj->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); } } else DRM_DEBUG_KMS("Failed to allocate render context." @@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev) } if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { - struct drm_i915_gem_object *obj_priv = NULL; - + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); if (dev_priv->pwrctx) { - obj_priv = to_intel_bo(dev_priv->pwrctx); - } else { - struct drm_gem_object *pwrctx; - - pwrctx = intel_alloc_context_page(dev); - if (pwrctx) { - dev_priv->pwrctx = pwrctx; - obj_priv = to_intel_bo(pwrctx); - } - } - - if (obj_priv) { - I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); I915_WRITE(MCHBAR_RENDER_STANDBY, I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } @@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev) dev_priv->display.disable_fbc(dev); if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj = dev_priv->renderctx; + + I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN); + POSTING_READ(CCID); - obj_priv = to_intel_bo(dev_priv->renderctx); - I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); - I915_READ(CCID); - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(dev_priv->renderctx); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->renderctx = NULL; } if (dev_priv->pwrctx) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + + I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN); + POSTING_READ(PWRCTXA); - obj_priv = to_intel_bo(dev_priv->pwrctx); - I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); - I915_READ(PWRCTXA); - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(dev_priv->pwrctx); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->pwrctx = NULL; } if (IS_IRONLAKE_M(dev)) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..5a4f14e36d6c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) struct intel_framebuffer { struct drm_framebuffer base; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; struct intel_fbdev { @@ -166,7 +166,7 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; - struct drm_gem_object *cursor_bo; + struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; @@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *old_fb_obj; - struct drm_gem_object *pending_flip_obj; + struct drm_i915_gem_object *old_fb_obj; + struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; bool enable_stall_check; @@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); -extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); +extern void intel_mark_busy(struct drm_device *dev, + struct drm_i915_gem_object *obj); extern void intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int dp_reg); void @@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index af2a1dddc28e..c2cffeb4fe89 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; - struct drm_gem_object *fbo = NULL; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; @@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); - fbo = i915_gem_alloc_object(dev, size); - if (!fbo) { + obj = i915_gem_alloc_object(dev, size); + if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } - obj_priv = to_intel_bo(fbo); mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, fbo, false); + ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; @@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, info->par = ifbdev; - ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); + ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; @@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev, else info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); - info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; + info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; - info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, - size); + info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; @@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, - obj_priv->gtt_offset, fbo); + obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); @@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, return 0; out_unpin: - i915_gem_object_unpin(fbo); + i915_gem_object_unpin(obj); out_unref: - drm_gem_object_unreference(fbo); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; @@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_unreference_unlocked(ifb->obj); + drm_gem_object_unreference_unlocked(&ifb->obj->base); ifb->obj = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index ec8ffaccbbdb..af715cc03ee0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay, static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj = &overlay->old_vid_bo->base; + struct drm_i915_gem_object *obj = overlay->old_vid_bo; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->old_vid_bo = NULL; } static void intel_overlay_off_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj = overlay->vid_bo; /* never have the overlay hw on without showing a frame */ BUG_ON(!overlay->vid_bo); - obj = &overlay->vid_bo->base; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->vid_bo = NULL; overlay->crtc->overlay = NULL; @@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) } static int intel_overlay_do_put_image(struct intel_overlay *overlay, - struct drm_gem_object *new_bo, + struct drm_i915_gem_object *new_bo, struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; - struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); struct drm_device *dev = overlay->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->SWIDTHSW = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); regs->SHEIGHT = params->src_h; - regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; + regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; regs->OSTRIDE = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; - regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; - regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; + regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; + regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; regs->OSTRIDE |= params->stride_UV << 16; } @@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = to_intel_bo(new_bo); + overlay->vid_bo = new_bo; return 0; @@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec) static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, - struct drm_gem_object *new_bo) + struct drm_i915_gem_object *new_bo) { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); @@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; break; @@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y * rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; tmp = rec->stride_UV * (rec->src_height / uv_vscale); - if (rec->offset_U + tmp > new_bo->size || - rec->offset_V + tmp > new_bo->size) + if (rec->offset_U + tmp > new_bo->base.size || + rec->offset_V + tmp > new_bo->base.size) return -EINVAL; break; } @@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct intel_overlay *overlay; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; - struct drm_gem_object *new_bo; + struct drm_i915_gem_object *new_bo; struct put_image_params *params; int ret; @@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); - new_bo = drm_gem_object_lookup(dev, file_priv, - put_image_rec->bo_handle); + new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, + put_image_rec->bo_handle)); if (!new_bo) { ret = -ENOENT; goto out_free; @@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); - drm_gem_object_unreference_unlocked(new_bo); + drm_gem_object_unreference_unlocked(&new_bo->base); out_free: kfree(params); @@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; - struct drm_gem_object *reg_bo; + struct drm_i915_gem_object *reg_bo; struct overlay_registers *regs; int ret; @@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev) reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (!reg_bo) goto out_free; - overlay->reg_bo = to_intel_bo(reg_bo); + overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, @@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->gtt_offset; + overlay->flip_addr = reg_bo->gtt_offset; ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev) out_unpin_bo: i915_gem_object_unpin(reg_bo); out_free_bo: - drm_gem_object_unreference(reg_bo); + drm_gem_object_unreference(®_bo->base); out_free: kfree(overlay); return; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1db860d7989a..181aad31125d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); + struct drm_i915_gem_object *obj = ring->obj; u32 head; /* Stop the ring if it's running. */ @@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->write_tail(ring, 0); /* Initialize the ring. */ - I915_WRITE_START(ring, obj_priv->gtt_offset); + I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) /* If the head is still not zero, the ring is dead */ if ((I915_READ_CTL(ring) & RING_VALID) == 0 || - I915_READ_START(ring) != obj_priv->gtt_offset || + I915_READ_START(ring) != obj->gtt_offset || (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, static void cleanup_status_page(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; obj = ring->status_page.obj; if (obj == NULL) return; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); @@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); @@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring) ret = -ENOMEM; goto err; } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + obj->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj_priv->gtt_offset; - ring->status_page.page_addr = kmap(obj_priv->pages[0]); + ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.page_addr = kmap(obj->pages[0]); if (ring->status_page.page_addr == NULL) { memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); goto err_unpin; @@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring) err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); err: return ret; } @@ -578,8 +574,7 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; ring->dev = dev; @@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev, goto err_hws; } - ring->gem_object = obj; + ring->obj = obj; ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; - obj_priv = to_intel_bo(obj); ring->map.size = ring->size; - ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.offset = dev->agp->base + obj->gtt_offset; ring->map.type = 0; ring->map.flags = 0; ring->map.mtrr = 0; @@ -632,8 +626,8 @@ err_unmap: err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); - ring->gem_object = NULL; + drm_gem_object_unreference(&obj->base); + ring->obj = NULL; err_hws: cleanup_status_page(ring); return ret; @@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) struct drm_i915_private *dev_priv; int ret; - if (ring->gem_object == NULL) + if (ring->obj == NULL) return; /* Disable the ring buffer. The ring must be idle at this point */ @@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) drm_core_ioremapfree(&ring->map, ring->dev); - i915_gem_object_unpin(ring->gem_object); - drm_gem_object_unreference(ring->gem_object); - ring->gem_object = NULL; + i915_gem_object_unpin(ring->obj); + drm_gem_object_unreference(&ring->obj->base); + ring->obj = NULL; if (ring->cleanup) ring->cleanup(ring); @@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring) u32 *ptr; int ret; - obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); + obj = i915_gem_alloc_object(ring->dev, 4096); if (obj == NULL) return -ENOMEM; - ret = i915_gem_object_pin(&obj->base, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) { drm_gem_object_unreference(&obj->base); return ret; @@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring) *ptr++ = MI_NOOP; kunmap(obj->pages[0]); - ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) { - i915_gem_object_unpin(&obj->base); + i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); return ret; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2565d65a625b..1747e329ee94 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -4,7 +4,7 @@ struct intel_hw_status_page { u32 __iomem *page_addr; unsigned int gfx_addr; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) @@ -32,7 +32,7 @@ struct intel_ring_buffer { u32 mmio_base; void *virtual_start; struct drm_device *dev; - struct drm_gem_object *gem_object; + struct drm_i915_gem_object *obj; unsigned int head; unsigned int tail; -- cgit v1.2.3 From 748ebc6017a943ec065e653e975a5e8dace77ac6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 24 Oct 2010 10:28:47 +0100 Subject: drm/i915: Record fence registers on error. Having seen the effects of erroneous fencing on the batchbuffer, a useful sanity check is to record the fence registers at the time of an error. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1e8cd74d18d5..addb9392e1c4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -684,6 +684,9 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); seq_printf(m, " seqno: 0x%08x\n", error->seqno); + for (i = 0; i < 16; i++) + seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + if (error->active_bo) print_error_buffers(m, "Active", error->active_bo, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 22d6388b331f..699e71a0dab7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -166,6 +166,7 @@ struct drm_i915_error_state { u32 instdone1; u32 seqno; u64 bbaddr; + u64 fence[16]; struct timeval time; struct drm_i915_error_object { int page_count; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 09ac3bbd8165..de95c7bb6dba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -581,6 +581,35 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, return i; } +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + } +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -656,6 +685,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->acthd = I915_READ(ACTHD); error->bbaddr = 0; } + i915_gem_record_fences(dev, error); bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); -- cgit v1.2.3 From ab5793ad3ae11a5cbe2194b449e5fdd80b19f14f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Nov 2010 13:24:13 +0000 Subject: drm/i915: Tweak on-error bbaddr parsing for clarity Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index de95c7bb6dba..9aa1e1dc5fd5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -525,26 +525,23 @@ i915_ringbuffer_last_batch(struct drm_device *dev, /* Locate the current position in the ringbuffer and walk back * to find the most recently dispatched batch buffer. */ - bbaddr = 0; head = I915_READ_HEAD(ring) & HEAD_ADDR; - val = (u32 *)(ring->virtual_start + head); + val = (u32 *)(ring->virtual_start + head); while (--val >= (u32 *)ring->virtual_start) { bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) - break; + return bbaddr; } - if (bbaddr == 0) { - val = (u32 *)(ring->virtual_start + ring->size); - while (--val >= (u32 *)ring->virtual_start) { - bbaddr = i915_get_bbaddr(dev, val); - if (bbaddr) - break; - } + val = (u32 *)(ring->virtual_start + ring->size); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); + if (bbaddr) + return bbaddr; } - return bbaddr; + return 0; } static u32 capture_bo_list(struct drm_i915_error_buffer *err, -- cgit v1.2.3 From 1ec14ad3132702694f2e1a90b30641cf111183b9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 11:30:53 +0000 Subject: drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 75 +++--- drivers/gpu/drm/i915/i915_dma.c | 60 +++-- drivers/gpu/drm/i915/i915_drv.c | 6 +- drivers/gpu/drm/i915/i915_drv.h | 48 ++-- drivers/gpu/drm/i915/i915_gem.c | 86 +++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 94 +++++-- drivers/gpu/drm/i915/i915_irq.c | 209 ++++++++------- drivers/gpu/drm/i915/i915_reg.h | 19 +- drivers/gpu/drm/i915/intel_display.c | 4 +- drivers/gpu/drm/i915/intel_opregion.c | 8 +- drivers/gpu/drm/i915/intel_overlay.c | 8 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 415 ++++++++++++++++++----------- drivers/gpu/drm/i915/intel_ringbuffer.h | 41 ++- drivers/gpu/drm/i915/intel_tv.c | 14 +- 14 files changed, 648 insertions(+), 439 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3c9d4b876865..aedb02157474 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -339,10 +339,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) return ret; count = 0; - if (!list_empty(&dev_priv->render_ring.request_list)) { + if (!list_empty(&dev_priv->ring[RCS].request_list)) { seq_printf(m, "Render requests:\n"); list_for_each_entry(gem_request, - &dev_priv->render_ring.request_list, + &dev_priv->ring[RCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -350,10 +350,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } count++; } - if (!list_empty(&dev_priv->bsd_ring.request_list)) { + if (!list_empty(&dev_priv->ring[VCS].request_list)) { seq_printf(m, "BSD requests:\n"); list_for_each_entry(gem_request, - &dev_priv->bsd_ring.request_list, + &dev_priv->ring[VCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -361,10 +361,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } count++; } - if (!list_empty(&dev_priv->blt_ring.request_list)) { + if (!list_empty(&dev_priv->ring[BCS].request_list)) { seq_printf(m, "BLT requests:\n"); list_for_each_entry(gem_request, - &dev_priv->blt_ring.request_list, + &dev_priv->ring[BCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -398,15 +398,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - i915_ring_seqno_info(m, &dev_priv->render_ring); - i915_ring_seqno_info(m, &dev_priv->bsd_ring); - i915_ring_seqno_info(m, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); @@ -419,7 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -458,9 +457,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - i915_ring_seqno_info(m, &dev_priv->render_ring); - i915_ring_seqno_info(m, &dev_priv->bsd_ring); - i915_ring_seqno_info(m, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); return 0; @@ -503,13 +501,7 @@ static int i915_hws_info(struct seq_file *m, void *data) volatile u32 *hws; int i; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; hws = (volatile u32 *)ring->status_page.page_addr; if (hws == NULL) return 0; @@ -569,17 +561,11 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct intel_ring_buffer *ring; int ret; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { @@ -603,21 +589,20 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (ring->size == 0) - return 0; + return 0; seq_printf(m, "Ring %s:\n", ring->name); seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); seq_printf(m, " Size : %08x\n", ring->size); seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); + seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); + if (IS_GEN6(dev)) { + seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); + seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); + } seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); @@ -1177,15 +1162,15 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER}, - {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD}, - {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD}, - {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT}, - {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9a22da9b2083..664300986fb4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -49,6 +49,8 @@ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); @@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); + memset(ring->status_page.page_addr, 0, PAGE_SIZE); if (INTEL_INFO(dev)->gen >= 4) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & @@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev) static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } - if (dev_priv->render_ring.status_page.gfx_addr) { - dev_priv->render_ring.status_page.gfx_addr = 0; + if (ring->status_page.gfx_addr) { + ring->status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } @@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting @@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev) static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; + /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); - intel_cleanup_ring_buffer(&dev_priv->render_ring); - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); - intel_cleanup_ring_buffer(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + struct intel_ring_buffer *ring = LP_RING(dev_priv); master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { @@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.obj != NULL) { + if (ring->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - dev_priv->render_ring.size = init->ring_size; + ring->size = init->ring_size; - dev_priv->render_ring.map.offset = init->ring_start; - dev_priv->render_ring.map.size = init->ring_size; - dev_priv->render_ring.map.type = 0; - dev_priv->render_ring.map.flags = 0; - dev_priv->render_ring.map.mtrr = 0; + ring->map.offset = init->ring_start; + ring->map.size = init->ring_size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); + drm_core_ioremap_wc(&ring->map, dev); - if (dev_priv->render_ring.map.handle == NULL) { + if (ring->map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; + ring->virtual_start = ring->map.handle; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; @@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); - struct intel_ring_buffer *ring; DRM_DEBUG_DRIVER("%s\n", __func__); - ring = &dev_priv->render_ring; - if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -326,7 +329,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) drm_i915_private_t *dev_priv = dev->dev_private; int i, ret; - if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) + if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; for (i = 0; i < dwords;) { @@ -565,13 +568,12 @@ static int i915_dispatch_flip(struct drm_device * dev) return 0; } -static int i915_quiescent(struct drm_device * dev) +static int i915_quiescent(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); - return intel_wait_ring_buffer(&dev_priv->render_ring, - dev_priv->render_ring.size - 8); + return intel_wait_ring_buffer(ring, ring->size - 8); } static int i915_flush_ioctl(struct drm_device *dev, void *data, @@ -828,7 +830,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; @@ -1978,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); dev_priv->trace_irq_seqno = 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 64844e2e9f86..413a040386a9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -487,11 +487,11 @@ int i915_reset(struct drm_device *dev, u8 flags) !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; - dev_priv->render_ring.init(&dev_priv->render_ring); + dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) - dev_priv->bsd_ring.init(&dev_priv->bsd_ring); + dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) - dev_priv->blt_ring.init(&dev_priv->blt_ring); + dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af9ff40b135b..8b19b5806230 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -269,9 +269,7 @@ typedef struct drm_i915_private { } *gmbus; struct pci_dev *bridge_dev; - struct intel_ring_buffer render_ring; - struct intel_ring_buffer bsd_ring; - struct intel_ring_buffer blt_ring; + struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; @@ -290,19 +288,15 @@ typedef struct drm_i915_private { int page_flipping; atomic_t irq_received; - /** Protects user_irq_refcount and irq_mask_reg */ - spinlock_t user_irq_lock; u32 trace_irq_seqno; + + /* protects the irq masks */ + spinlock_t irq_lock; /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask_reg; u32 pipestat[2]; - /** splitted irq regs for graphics and display engine on Ironlake, - irq_mask_reg is still used for display irq. */ - u32 gt_irq_mask_reg; - u32 gt_irq_enable_reg; - u32 de_irq_enable_reg; - u32 pch_irq_mask_reg; - u32 pch_irq_enable_reg; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; u32 hotplug_supported_mask; struct work_struct hotplug_work; @@ -1104,7 +1098,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring, + u32 seqno); /** * Returns true if seq1 is later than seq2. @@ -1272,6 +1267,17 @@ extern void intel_display_print_error_state(struct seq_file *m, struct intel_display_error_state *error); #endif +#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) + +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + /** * Lock test for when it's just for synchronization of ring access. * @@ -1279,8 +1285,7 @@ extern void intel_display_print_error_state(struct seq_file *m, * has access to the ring. */ #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ - == NULL) \ + if (LP_RING(dev->dev_private)->obj == NULL) \ LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) @@ -1366,15 +1371,6 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) } } -#define BEGIN_LP_RING(n) \ - intel_ring_begin(&dev_priv->render_ring, (n)) - -#define OUT_RING(x) \ - intel_ring_emit(&dev_priv->render_ring, x) - -#define ADVANCE_LP_RING() \ - intel_ring_advance(&dev_priv->render_ring) - /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or @@ -1391,7 +1387,7 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ - (dev_priv->render_ring.status_page.page_addr))[reg]) + (LP_RING(dev_priv)->status_page.page_addr))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d99212fe54ed..eeed2e99d247 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1561,11 +1561,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + u32 seqno) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); obj->ring = ring; @@ -1679,7 +1679,8 @@ i915_gem_process_flushing_list(struct drm_device *dev, obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); - i915_gem_object_move_to_active(obj, ring); + i915_gem_object_move_to_active(obj, ring, + i915_gem_next_request_seqno(dev, ring)); trace_i915_gem_object_change_domain(obj, obj->base.read_domains, @@ -1804,10 +1805,10 @@ void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + int i; - i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the @@ -1846,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + int i; if (!ring->status_page.page_addr || list_empty(&ring->request_list)) @@ -1854,6 +1856,11 @@ i915_gem_retire_requests_ring(struct drm_device *dev, WARN_ON(i915_verify_lists(dev)); seqno = ring->get_seqno(ring); + + for (i = 0; i < I915_NUM_RINGS; i++) + if (seqno >= ring->sync_seqno[i]) + ring->sync_seqno[i] = 0; + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1892,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(ring); + ring->irq_put(ring); dev_priv->trace_irq_seqno = 0; } @@ -1903,6 +1910,7 @@ void i915_gem_retire_requests(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; if (!list_empty(&dev_priv->mm.deferred_free_list)) { struct drm_i915_gem_object *obj, *next; @@ -1918,9 +1926,8 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_free_object_tail(obj); } - i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); } static void @@ -1942,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && - (!list_empty(&dev_priv->render_ring.request_list) || - !list_empty(&dev_priv->bsd_ring.request_list) || - !list_empty(&dev_priv->blt_ring.request_list))) + (!list_empty(&dev_priv->ring[RCS].request_list) || + !list_empty(&dev_priv->ring[VCS].request_list) || + !list_empty(&dev_priv->ring[BCS].request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -1993,7 +2000,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_seqno = seqno; - ring->user_irq_get(ring); + ring->irq_get(ring); if (interruptible) ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed(ring->get_seqno(ring), seqno) @@ -2003,7 +2010,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(ring); + ring->irq_put(ring); ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -2159,7 +2166,7 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - int ret; + int ret, i; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); @@ -2167,17 +2174,11 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - ret = i915_ring_idle(dev, &dev_priv->render_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->bsd_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->blt_ring); - if (ret) - return ret; + for (i = 0; i < I915_NUM_RINGS; i++) { + ret = i915_ring_idle(dev, &dev_priv->ring[i]); + if (ret) + return ret; + } return 0; } @@ -3153,11 +3154,11 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) * generation is designed to be run atomically and so is * lockless. */ - ring->user_irq_get(ring); + ring->irq_get(ring); ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(ring); + ring->irq_put(ring); if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) ret = -EIO; @@ -3584,9 +3585,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) return 0; cleanup_bsd_ring: - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); cleanup_render_ring: - intel_cleanup_ring_buffer(&dev_priv->render_ring); + intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); return ret; } @@ -3594,10 +3595,10 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; - intel_cleanup_ring_buffer(&dev_priv->render_ring); - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); - intel_cleanup_ring_buffer(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); } int @@ -3605,7 +3606,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; @@ -3625,14 +3626,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); + for (i = 0; i < I915_NUM_RINGS; i++) { + BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); + BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); + } mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -3695,9 +3694,8 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->mm.gtt_list); - init_ring_lists(&dev_priv->render_ring); - init_ring_lists(&dev_priv->bsd_ring); - init_ring_lists(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 9bdc495e17bb..6fc9cc485781 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -632,23 +632,59 @@ i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { - if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, &dev_priv->render_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, &dev_priv->bsd_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, &dev_priv->blt_ring, - invalidate_domains, flush_domains); + for (i = 0; i < I915_NUM_RINGS; i++) + if (flush_rings & (1 << i)) + i915_gem_flush_ring(dev, &dev_priv->ring[i], + invalidate_domains, + flush_domains); } } +static int +i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) +{ + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (INTEL_INFO(obj->base.dev)->gen < 6) + return i915_gem_object_wait_rendering(obj, true); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_rendering_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + if (seqno == from->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + ret = i915_add_request(obj->base.dev, NULL, request, from); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; + } + + from->sync_seqno[idx] = seqno; + return intel_ring_sync(to, from, seqno - 1); +} static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, @@ -678,12 +714,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, } list_for_each_entry(obj, objects, exec_list) { - /* XXX replace with semaphores */ - if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } + ret = i915_gem_execbuffer_sync_rings(obj, ring); + if (ret) + return ret; } return 0; @@ -769,7 +802,8 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, static void i915_gem_execbuffer_move_to_active(struct list_head *objects, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + u32 seqno) { struct drm_i915_gem_object *obj; @@ -778,7 +812,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->base.write_domain = obj->base.pending_write_domain; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - i915_gem_object_move_to_active(obj, ring); + i915_gem_object_move_to_active(obj, ring, seqno); if (obj->base.write_domain) { obj->dirty = 1; obj->pending_gpu_write = true; @@ -833,6 +867,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; u32 exec_start, exec_len; + u32 seqno; int ret, i; if (!i915_gem_check_execbuffer(args)) { @@ -851,21 +886,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: - ring = &dev_priv->render_ring; + ring = &dev_priv->ring[RCS]; break; case I915_EXEC_BSD: if (!HAS_BSD(dev)) { DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } - ring = &dev_priv->bsd_ring; + ring = &dev_priv->ring[VCS]; break; case I915_EXEC_BLT: if (!HAS_BLT(dev)) { DRM_ERROR("execbuf with invalid ring (BLT)\n"); return -EINVAL; } - ring = &dev_priv->blt_ring; + ring = &dev_priv->ring[BCS]; break; default: DRM_ERROR("execbuf with unknown ring: %d\n", @@ -879,7 +914,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (args->num_cliprects != 0) { - if (ring != &dev_priv->render_ring) { + if (ring != &dev_priv->ring[RCS]) { DRM_ERROR("clip rectangles are only valid with the render ring\n"); return -EINVAL; } @@ -972,6 +1007,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto err; + seqno = i915_gem_next_request_seqno(dev, ring); + for (i = 0; i < I915_NUM_RINGS-1; i++) { + if (seqno < ring->sync_seqno[i]) { + /* The GPU can not handle its semaphore value wrapping, + * so every billion or so execbuffers, we need to stall + * the GPU in order to reset the counters. + */ + ret = i915_gpu_idle(dev); + if (ret) + goto err; + + BUG_ON(ring->sync_seqno[i]); + } + } + exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { @@ -992,7 +1042,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - i915_gem_execbuffer_move_to_active(&objects, ring); + i915_gem_execbuffer_move_to_active(&objects, ring, seqno); i915_gem_execbuffer_retire_commands(dev, file, ring); err: diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9aa1e1dc5fd5..5e831b7eb3f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -67,9 +67,9 @@ void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != 0) { - dev_priv->gt_irq_mask_reg &= ~mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + if ((dev_priv->gt_irq_mask & mask) != 0) { + dev_priv->gt_irq_mask &= ~mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } } @@ -77,9 +77,9 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != mask) { - dev_priv->gt_irq_mask_reg |= mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + if ((dev_priv->gt_irq_mask & mask) != mask) { + dev_priv->gt_irq_mask |= mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } } @@ -88,9 +88,9 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } @@ -98,9 +98,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) static inline void ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } @@ -108,9 +108,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } } @@ -118,9 +118,9 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } } @@ -163,9 +163,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) /** * intel_enable_asle - enable ASLE interrupt for OpRegion */ -void intel_enable_asle (struct drm_device *dev) +void intel_enable_asle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); @@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } /** @@ -344,12 +349,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) - notify_ring(dev, &dev_priv->render_ring); + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) - notify_ring(dev, &dev_priv->bsd_ring); - if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->blt_ring); + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -640,8 +645,7 @@ static void i915_capture_error_state(struct drm_device *dev) DRM_DEBUG_DRIVER("generating error event\n"); - error->seqno = - dev_priv->render_ring.get_seqno(&dev_priv->render_ring); + error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -656,16 +660,16 @@ static void i915_capture_error_state(struct drm_device *dev) error->bcs_ipeir = I915_READ(BCS_IPEIR); error->bcs_instdone = I915_READ(BCS_INSTDONE); error->bcs_seqno = 0; - if (dev_priv->blt_ring.get_seqno) - error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); + if (dev_priv->ring[BCS].get_seqno) + error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); error->vcs_acthd = I915_READ(VCS_ACTHD); error->vcs_ipehr = I915_READ(VCS_IPEHR); error->vcs_ipeir = I915_READ(VCS_IPEIR); error->vcs_instdone = I915_READ(VCS_INSTDONE); error->vcs_seqno = 0; - if (dev_priv->bsd_ring.get_seqno) - error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring); + if (dev_priv->ring[VCS].get_seqno) + error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); @@ -684,7 +688,7 @@ static void i915_capture_error_state(struct drm_device *dev) } i915_gem_record_fences(dev, error); - bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); + bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); /* Grab the current batchbuffer, most likely to have crashed. */ batchbuffer[0] = NULL; @@ -748,7 +752,7 @@ static void i915_capture_error_state(struct drm_device *dev) /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, - dev_priv->render_ring.obj); + dev_priv->ring[RCS].obj); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; @@ -949,11 +953,11 @@ void i915_handle_error(struct drm_device *dev, bool wedged) /* * Wakeup waiting processes so they don't hang */ - wake_up_all(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - wake_up_all(&dev_priv->blt_ring.irq_queue); + wake_up_all(&dev_priv->ring[BCS].irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -1035,7 +1039,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); pipea_stats = I915_READ(PIPEASTAT); pipeb_stats = I915_READ(PIPEBSTAT); @@ -1058,7 +1062,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) I915_WRITE(PIPEBSTAT, pipeb_stats); irq_received = 1; } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (!irq_received) break; @@ -1091,9 +1095,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - notify_ring(dev, &dev_priv->bsd_ring); + notify_ring(dev, &dev_priv->ring[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1180,10 +1184,10 @@ static int i915_emit_irq(struct drm_device * dev) void i915_trace_irq_get(struct drm_device *dev, u32 seqno) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->trace_irq_seqno == 0) - render_ring->user_irq_get(render_ring); + ring->irq_get(ring); dev_priv->trace_irq_seqno = seqno; } @@ -1193,7 +1197,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1207,10 +1211,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - render_ring->user_irq_get(render_ring); - DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, + ring->irq_get(ring); + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - render_ring->user_irq_put(render_ring); + ring->irq_put(ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1229,7 +1233,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - if (!dev_priv || !dev_priv->render_ring.virtual_start) { + if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } @@ -1275,9 +1279,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, @@ -1285,7 +1289,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } @@ -1297,15 +1301,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void i915_enable_interrupt (struct drm_device *dev) @@ -1397,6 +1401,27 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) return false; } +static bool kick_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + if (IS_GEN6(dev) && + (tmp & RING_WAIT_SEMAPHORE)) { + DRM_ERROR("Kicking stuck semaphore on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + return false; +} + /** * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. The first time this is called we simply record @@ -1411,9 +1436,9 @@ void i915_hangcheck_elapsed(unsigned long data) bool err = false; /* If all work is done then ACTHD clearly hasn't advanced. */ - if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) && - i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) && - i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) { + if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { dev_priv->hangcheck_count = 0; if (err) goto repeat; @@ -1442,12 +1467,17 @@ void i915_hangcheck_elapsed(unsigned long data) * and break the hang. This should work on * all but the second generation chipsets. */ - struct intel_ring_buffer *ring = &dev_priv->render_ring; - u32 tmp = I915_READ_CTL(ring); - if (tmp & RING_WAIT) { - I915_WRITE_CTL(ring, tmp); + + if (kick_ring(&dev_priv->ring[RCS])) + goto repeat; + + if (HAS_BSD(dev) && + kick_ring(&dev_priv->ring[VCS])) + goto repeat; + + if (HAS_BLT(dev) && + kick_ring(&dev_priv->ring[BCS])) goto repeat; - } } i915_handle_error(dev, true); @@ -1498,37 +1528,37 @@ static int ironlake_irq_postinstall(struct drm_device *dev) /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; - u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; + u32 render_irqs; u32 hotplug_mask; - dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); POSTING_READ(DEIER); - if (IS_GEN6(dev)) { - render_mask = - GT_PIPE_NOTIFY | - GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; - } - - dev_priv->gt_irq_mask_reg = ~render_mask; - dev_priv->gt_irq_enable_reg = render_mask; + dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); if (IS_GEN6(dev)) { - I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); - I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); + I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); } - I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); + if (IS_GEN6(dev)) + render_irqs = + GT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + else + render_irqs = + GT_PIPE_NOTIFY | + GT_BSD_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { @@ -1539,12 +1569,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev) SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; } - dev_priv->pch_irq_mask_reg = ~hotplug_mask; - dev_priv->pch_irq_enable_reg = hotplug_mask; + dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); - I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); - I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); + I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); if (IS_IRONLAKE_M(dev)) { @@ -1594,11 +1623,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; u32 error_mask; - DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -1606,7 +1635,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) return ironlake_irq_postinstall(dev); /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; + dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; @@ -1615,7 +1644,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } /* @@ -1633,7 +1662,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) } I915_WRITE(EMR, error_mask); - I915_WRITE(IMR, dev_priv->irq_mask_reg); + I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); POSTING_READ(IER); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 06175e98c5bb..3e03094cf148 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -176,6 +176,11 @@ #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) /* * 3D instructions used by the kernel */ @@ -276,9 +281,12 @@ #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) #define RING_CTL(base) ((base)+0x3c) +#define RING_SYNC_0(base) ((base)+0x40) +#define RING_SYNC_1(base) ((base)+0x44) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_ACTHD(base) ((base)+0x74) +#define RING_NOPID(base) ((base)+0x94) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -293,6 +301,7 @@ #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ #if 0 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 @@ -347,6 +356,14 @@ # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) +#define GFX_MODE 0x02520 +#define GFX_RUN_LIST_ENABLE (1<<15) +#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) +#define GFX_SURFACE_FAULT_ENABLE (1<<12) +#define GFX_REPLAY_MODE (1<<11) +#define GFX_PSMI_GRANULARITY (1<<10) +#define GFX_PPGTT_ENABLE (1<<9) + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -498,7 +515,7 @@ #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_IMR 0x120a8 -#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) +#define GEN6_BSD_USER_INTERRUPT (1 << 12) #define GEN6_BSD_RNCID 0x12198 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3063edd2456f..0b6272a2edfc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1998,7 +1998,7 @@ static void intel_clear_scanline_wait(struct drm_device *dev) /* Can't break the hang on i8xx */ return; - ring = &dev_priv->render_ring; + ring = LP_RING(dev_priv); tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) I915_WRITE_CTL(ring, tmp); @@ -5124,7 +5124,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring); + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto cleanup_work; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 9b0d9a867aea..f295a7aaadf9 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev) struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { - if (IS_MOBILE(dev)) { - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_MOBILE(dev)) intel_enable_asle(dev); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, - irqflags); - } asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d0c1add393a3..3fbb98b948d6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -221,7 +221,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); if (ret) { kfree(request); return ret; @@ -230,7 +230,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, overlay->flip_tail = tail; ret = i915_do_wait_request(dev, overlay->last_flip_req, true, - &dev_priv->render_ring); + LP_RING(dev_priv)); if (ret) return ret; @@ -364,7 +364,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, OUT_RING(flip_addr); ADVANCE_LP_RING(); - ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); if (ret) { kfree(request); return ret; @@ -454,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, return 0; ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); + interruptible, LP_RING(dev_priv)); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 21871b0766e2..f71db0cf4909 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -203,6 +203,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (ring->space < 0) ring->space += ring->size; } + return 0; } @@ -281,17 +282,18 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); if (INTEL_INFO(dev)->gen > 3) { - drm_i915_private_t *dev_priv = dev->dev_private; int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; I915_WRITE(MI_MODE, mode); } - if (HAS_PIPE_CONTROL(dev)) { + if (INTEL_INFO(dev)->gen >= 6) { + } else if (HAS_PIPE_CONTROL(dev)) { ret = init_pipe_control(ring); if (ret) return ret; @@ -308,6 +310,80 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) cleanup_pipe_control(ring); } +static void +update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int id; + + /* + * cs -> 1 = vcs, 0 = bcs + * vcs -> 1 = bcs, 0 = cs, + * bcs -> 1 = cs, 0 = vcs. + */ + id = ring - dev_priv->ring; + id += 2 - i; + id %= 3; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + MI_SEMAPHORE_UPDATE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, + RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); +} + +static int +gen6_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + u32 seqno; + int ret; + + ret = intel_ring_begin(ring, 10); + if (ret) + return ret; + + seqno = i915_gem_get_seqno(ring->dev); + update_semaphore(ring, 0, seqno); + update_semaphore(ring, 1, seqno); + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} + +int +intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + intel_ring_sync_index(ring, to) << 17 | + MI_SEMAPHORE_COMPARE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + #define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ @@ -317,131 +393,128 @@ do { \ intel_ring_emit(ring__, 0); \ } while (0) -/** - * Creates a new sequence number, emitting a write of it to the status page - * plus an interrupt, which will trigger i915_user_interrupt_handler. - * - * Must be called with struct_lock held. - * - * Returned sequence numbers are nonzero on success. - */ static int -render_ring_add_request(struct intel_ring_buffer *ring, - u32 *result) +pc_render_add_request(struct intel_ring_buffer *ring, + u32 *result) { struct drm_device *dev = ring->dev; u32 seqno = i915_gem_get_seqno(dev); struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; int ret; - if (IS_GEN6(dev)) { - ret = intel_ring_begin(ring, 6); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - } else if (HAS_PIPE_CONTROL(dev)) { - u32 scratch_addr = pc->gtt_offset + 128; + /* + * Workaround qword write incoherence by flushing the + * PIPE_NOTIFY buffers out to memory before requesting + * an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; - /* - * Workaround qword write incoherence by flushing the - * PIPE_NOTIFY buffers out to memory before requesting - * an interrupt. - */ - ret = intel_ring_begin(ring, 32); - if (ret) - return ret; + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - } else { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + *result = seqno; + return 0; +} - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); +static int +render_ring_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + int ret; - intel_ring_emit(ring, MI_USER_INTERRUPT); - } + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); + *result = seqno; return 0; } static u32 -render_ring_get_seqno(struct intel_ring_buffer *ring) +ring_get_seqno(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - if (HAS_PIPE_CONTROL(dev)) { - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; - } else - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; } static void -render_ring_get_user_irq(struct intel_ring_buffer *ring) +render_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { + if (dev->irq_enabled && ++ring->irq_refcount == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (HAS_PCH_SPLIT(dev)) - ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_enable_graphics_irq(dev_priv, + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } static void -render_ring_put_user_irq(struct intel_ring_buffer *ring) +render_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); - if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { + BUG_ON(dev->irq_enabled && ring->irq_refcount == 0); + if (dev->irq_enabled && --ring->irq_refcount == 0) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_disable_graphics_irq(dev_priv, + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } void intel_ring_setup_status_page(struct intel_ring_buffer *ring) @@ -459,6 +532,9 @@ bsd_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (intel_ring_begin(ring, 2) == 0) { intel_ring_emit(ring, MI_FLUSH); intel_ring_emit(ring, MI_NOOP); @@ -491,20 +567,45 @@ ring_add_request(struct intel_ring_buffer *ring, } static void -bsd_ring_get_user_irq(struct intel_ring_buffer *ring) +ring_get_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (dev->irq_enabled && ++ring->irq_refcount == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } + static void -bsd_ring_put_user_irq(struct intel_ring_buffer *ring) +ring_put_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (dev->irq_enabled && --ring->irq_refcount == 0) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_disable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } -static u32 -ring_status_page_get_seqno(struct intel_ring_buffer *ring) + +static void +bsd_ring_get_irq(struct intel_ring_buffer *ring) { - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + ring_get_irq(ring, GT_BSD_USER_INTERRUPT); +} +static void +bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_BSD_USER_INTERRUPT); } static int @@ -817,9 +918,9 @@ static const struct intel_ring_buffer render_ring = { .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, - .get_seqno = render_ring_get_seqno, - .user_irq_get = render_ring_get_user_irq, - .user_irq_put = render_ring_put_user_irq, + .get_seqno = ring_get_seqno, + .irq_get = render_ring_get_irq, + .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, .cleanup = render_ring_cleanup, }; @@ -835,9 +936,9 @@ static const struct intel_ring_buffer bsd_ring = { .write_tail = ring_write_tail, .flush = bsd_ring_flush, .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, + .get_seqno = ring_get_seqno, + .irq_get = bsd_ring_get_irq, + .irq_put = bsd_ring_put_irq, .dispatch_execbuffer = ring_dispatch_execbuffer, }; @@ -868,6 +969,9 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (intel_ring_begin(ring, 4) == 0) { intel_ring_emit(ring, MI_FLUSH_DW); intel_ring_emit(ring, 0); @@ -895,33 +999,46 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } +static void +gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) +{ + ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + +static void +gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + /* ring buffer for Video Codec for Gen6+ */ static const struct intel_ring_buffer gen6_bsd_ring = { - .name = "gen6 bsd ring", - .id = RING_BSD, - .mmio_base = GEN6_BSD_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_ring_common, - .write_tail = gen6_bsd_ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, + .name = "gen6 bsd ring", + .id = RING_BSD, + .mmio_base = GEN6_BSD_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .write_tail = gen6_bsd_ring_write_tail, + .flush = gen6_ring_flush, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = gen6_bsd_ring_get_irq, + .irq_put = gen6_bsd_ring_put_irq, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, }; /* Blitter support (SandyBridge+) */ static void -blt_ring_get_user_irq(struct intel_ring_buffer *ring) +blt_ring_get_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + ring_get_irq(ring, GT_BLT_USER_INTERRUPT); } + static void -blt_ring_put_user_irq(struct intel_ring_buffer *ring) +blt_ring_put_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + ring_put_irq(ring, GT_BLT_USER_INTERRUPT); } @@ -994,6 +1111,9 @@ static void blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (blt_ring_begin(ring, 4) == 0) { intel_ring_emit(ring, MI_FLUSH_DW); intel_ring_emit(ring, 0); @@ -1003,30 +1123,6 @@ static void blt_ring_flush(struct intel_ring_buffer *ring, } } -static int -blt_ring_add_request(struct intel_ring_buffer *ring, - u32 *result) -{ - u32 seqno; - int ret; - - ret = blt_ring_begin(ring, 4); - if (ret) - return ret; - - seqno = i915_gem_get_seqno(ring->dev); - - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - - DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); - *result = seqno; - return 0; -} - static void blt_ring_cleanup(struct intel_ring_buffer *ring) { if (!ring->private) @@ -1045,10 +1141,10 @@ static const struct intel_ring_buffer gen6_blt_ring = { .init = blt_ring_init, .write_tail = ring_write_tail, .flush = blt_ring_flush, - .add_request = blt_ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = blt_ring_get_user_irq, - .user_irq_put = blt_ring_put_user_irq, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = blt_ring_get_irq, + .irq_put = blt_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .cleanup = blt_ring_cleanup, }; @@ -1056,36 +1152,43 @@ static const struct intel_ring_buffer gen6_blt_ring = { int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - dev_priv->render_ring = render_ring; + *ring = render_ring; + if (INTEL_INFO(dev)->gen >= 6) { + ring->add_request = gen6_add_request; + } else if (HAS_PIPE_CONTROL(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; + } if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; - memset(dev_priv->render_ring.status_page.page_addr, - 0, PAGE_SIZE); + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); } - return intel_init_ring_buffer(dev, &dev_priv->render_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; if (IS_GEN6(dev)) - dev_priv->bsd_ring = gen6_bsd_ring; + *ring = gen6_bsd_ring; else - dev_priv->bsd_ring = bsd_ring; + *ring = bsd_ring; - return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_blt_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; - dev_priv->blt_ring = gen6_blt_ring; + *ring = gen6_blt_ring; - return intel_init_ring_buffer(dev, &dev_priv->blt_ring); + return intel_init_ring_buffer(dev, ring); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8e3526777926..6a3822bc6af2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -1,6 +1,13 @@ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ +enum { + RCS = 0x0, + VCS, + BCS, + I915_NUM_RINGS, +}; + struct intel_hw_status_page { u32 __iomem *page_addr; unsigned int gfx_addr; @@ -21,7 +28,10 @@ struct intel_hw_status_page { #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) -struct drm_i915_gem_execbuffer2; +#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) +#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) +#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) + struct intel_ring_buffer { const char *name; enum intel_ring_id { @@ -42,9 +52,10 @@ struct intel_ring_buffer { u32 irq_seqno; /* last seq seem at irq time */ u32 waiting_seqno; - int user_irq_refcount; - void (*user_irq_get)(struct intel_ring_buffer *ring); - void (*user_irq_put)(struct intel_ring_buffer *ring); + u32 sync_seqno[I915_NUM_RINGS-1]; + u32 irq_refcount; + void (*irq_get)(struct intel_ring_buffer *ring); + void (*irq_put)(struct intel_ring_buffer *ring); int (*init)(struct intel_ring_buffer *ring); @@ -98,6 +109,25 @@ struct intel_ring_buffer { void *private; }; +static inline u32 +intel_ring_sync_index(struct intel_ring_buffer *ring, + struct intel_ring_buffer *other) +{ + int idx; + + /* + * cs -> 0 = vcs, 1 = bcs + * vcs -> 0 = bcs, 1 = cs, + * bcs -> 0 = cs, 1 = vcs. + */ + + idx = (other - ring) - 1; + if (idx < 0) + idx += I915_NUM_RINGS; + + return idx; +} + static inline u32 intel_read_status_page(struct intel_ring_buffer *ring, int reg) @@ -119,6 +149,9 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, void intel_ring_advance(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); +int intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 2f7681989316..93206e4eaa6f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) int type; /* Disable TV interrupts around load detect or we'll recurse */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); save_tv_dac = tv_dac = I915_READ(TV_DAC); save_tv_ctl = tv_ctl = I915_READ(TV_CTL); @@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) I915_WRITE(TV_CTL, save_tv_ctl); /* Restore interrupt config */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return type; } -- cgit v1.2.3 From 88f23b8fa3e6357c423af24ec31c661fc12f884b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 15:08:31 +0000 Subject: drm/i915: Avoid using PIPE_CONTROL on Ironlake The workaround is hideous and we are using the STORE_DWORD on all other generations on all other rings, so use for the gen5 render ring as well. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 159 +------------------------------- 2 files changed, 4 insertions(+), 159 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5e831b7eb3f1..02e4dd82f754 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -349,7 +349,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + if (gt_iir & GT_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) notify_ring(dev, &dev_priv->ring[VCS]); @@ -1556,7 +1556,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) GT_BLT_USER_INTERRUPT; else render_irqs = - GT_PIPE_NOTIFY | + GT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f71db0cf4909..0ee78525959a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -207,78 +207,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) return 0; } -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -struct pipe_control { - struct drm_i915_gem_object *obj; - volatile u32 *cpu_page; - u32 gtt_offset; -}; - -static int -init_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc; - struct drm_i915_gem_object *obj; - int ret; - - if (ring->private) - return 0; - - pc = kmalloc(sizeof(*pc), GFP_KERNEL); - if (!pc) - return -ENOMEM; - - obj = i915_gem_alloc_object(ring->dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; - goto err; - } - obj->agp_type = AGP_USER_CACHED_MEMORY; - - ret = i915_gem_object_pin(obj, 4096, true); - if (ret) - goto err_unref; - - pc->gtt_offset = obj->gtt_offset; - pc->cpu_page = kmap(obj->pages[0]); - if (pc->cpu_page == NULL) - goto err_unpin; - - pc->obj = obj; - ring->private = pc; - return 0; - -err_unpin: - i915_gem_object_unpin(obj); -err_unref: - drm_gem_object_unreference(&obj->base); -err: - kfree(pc); - return ret; -} - -static void -cleanup_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - struct drm_i915_gem_object *obj; - - if (!ring->private) - return; - - obj = pc->obj; - kunmap(obj->pages[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - - kfree(pc); - ring->private = NULL; -} - static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -292,24 +220,9 @@ static int init_render_ring(struct intel_ring_buffer *ring) I915_WRITE(MI_MODE, mode); } - if (INTEL_INFO(dev)->gen >= 6) { - } else if (HAS_PIPE_CONTROL(dev)) { - ret = init_pipe_control(ring); - if (ret) - return ret; - } - return ret; } -static void render_ring_cleanup(struct intel_ring_buffer *ring) -{ - if (!ring->private) - return; - - cleanup_pipe_control(ring); -} - static void update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) { @@ -384,62 +297,6 @@ intel_ring_sync(struct intel_ring_buffer *ring, return 0; } -#define PIPE_CONTROL_FLUSH(ring__, addr__) \ -do { \ - intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ - PIPE_CONTROL_DEPTH_STALL | 2); \ - intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ - intel_ring_emit(ring__, 0); \ - intel_ring_emit(ring__, 0); \ -} while (0) - -static int -pc_render_add_request(struct intel_ring_buffer *ring, - u32 *result) -{ - struct drm_device *dev = ring->dev; - u32 seqno = i915_gem_get_seqno(dev); - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; - int ret; - - /* - * Workaround qword write incoherence by flushing the - * PIPE_NOTIFY buffers out to memory before requesting - * an interrupt. - */ - ret = intel_ring_begin(ring, 32); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - - *result = seqno; - return 0; -} - static int render_ring_add_request(struct intel_ring_buffer *ring, u32 *result) @@ -468,13 +325,6 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } -static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; -} - static void render_ring_get_irq(struct intel_ring_buffer *ring) { @@ -488,7 +338,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, - GT_PIPE_NOTIFY | GT_USER_INTERRUPT); + GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); @@ -509,8 +359,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, - GT_USER_INTERRUPT | - GT_PIPE_NOTIFY); + GT_USER_INTERRUPT); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -922,7 +771,6 @@ static const struct intel_ring_buffer render_ring = { .irq_get = render_ring_get_irq, .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, - .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ @@ -1157,9 +1005,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) *ring = render_ring; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - } else if (HAS_PIPE_CONTROL(dev)) { - ring->add_request = pc_render_add_request; - ring->get_seqno = pc_render_get_seqno; } if (!I915_NEED_GFX_HWS(dev)) { -- cgit v1.2.3 From b13c2b96bf15b9dd0f1a45fd788f3a3025c5aec6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Dec 2010 16:54:50 +0000 Subject: drm/i915/ringbuffer: Make IRQ refcnting atomic In order to enforce the correct memory barriers for irq get/put, we need to perform the actual counting using atomic operations. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 37 ++++++++++++++------------- drivers/gpu/drm/i915/i915_irq.c | 17 +++++++------ drivers/gpu/drm/i915/intel_ringbuffer.c | 44 +++++++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +-- 4 files changed, 56 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 27fa2a1b26a5..726c2ccd674c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2000,17 +2000,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_seqno = seqno; - ring->irq_get(ring); - if (interruptible) - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - else - wait_event(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); + ret = -ENODEV; + if (ring->irq_get(ring)) { + if (interruptible) + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + else + wait_event(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); - ring->irq_put(ring); + ring->irq_put(ring); + } ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -3157,14 +3159,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) * generation is designed to be run atomically and so is * lockless. */ - ring->irq_get(ring); - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - ring->irq_put(ring); + if (ring->irq_get(ring)) { + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->irq_put(ring); - if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) - ret = -EIO; + if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + ret = -EIO; + } } if (ret == 0) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02e4dd82f754..2ddb98b5c90f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1186,10 +1186,9 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); - if (dev_priv->trace_irq_seqno == 0) - ring->irq_get(ring); - - dev_priv->trace_irq_seqno = seqno; + if (dev_priv->trace_irq_seqno == 0 && + ring->irq_get(ring)) + dev_priv->trace_irq_seqno = seqno; } static int i915_wait_irq(struct drm_device * dev, int irq_nr) @@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - ring->irq_get(ring); - DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, - READ_BREADCRUMB(dev_priv) >= irq_nr); - ring->irq_put(ring); + ret = -ENODEV; + if (ring->irq_get(ring)) { + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); + ring->irq_put(ring); + } if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 74b99718a1fb..a3fd993e0de0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } -static void +static bool render_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && ++ring->irq_refcount == 1) { + if (!dev->irq_enabled) + return false; + + if (atomic_inc_return(&ring->irq_refcount) == 1) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + + return true; } static void @@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - BUG_ON(dev->irq_enabled && ring->irq_refcount == 0); - if (dev->irq_enabled && --ring->irq_refcount == 0) { + if (atomic_dec_and_test(&ring->irq_refcount)) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring, return 0; } -static void +static bool ring_get_irq(struct intel_ring_buffer *ring, u32 flag) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && ++ring->irq_refcount == 1) { + if (!dev->irq_enabled) + return false; + + if (atomic_inc_return(&ring->irq_refcount) == 1) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) ironlake_enable_graphics_irq(dev_priv, flag); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + + return true; } static void @@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && --ring->irq_refcount == 0) { + if (atomic_dec_and_test(&ring->irq_refcount)) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) } } - -static void +static bool bsd_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_BSD_USER_INTERRUPT); + return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); } static void bsd_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_BSD_USER_INTERRUPT); + ring_put_irq(ring, GT_BSD_USER_INTERRUPT); } static int @@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } -static void +static bool gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); + return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); } static void gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); + ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); } /* ring buffer for Video Codec for Gen6+ */ @@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = { /* Blitter support (SandyBridge+) */ -static void +static bool blt_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_BLT_USER_INTERRUPT); + return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); } static void blt_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_BLT_USER_INTERRUPT); + ring_put_irq(ring, GT_BLT_USER_INTERRUPT); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9652e4600b5e..8e2e357ad6ee 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -54,8 +54,8 @@ struct intel_ring_buffer { u32 irq_seqno; /* last seq seem at irq time */ u32 waiting_seqno; u32 sync_seqno[I915_NUM_RINGS-1]; - u32 irq_refcount; - void (*irq_get)(struct intel_ring_buffer *ring); + atomic_t irq_refcount; + bool __must_check (*irq_get)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring); int (*init)(struct intel_ring_buffer *ring); -- cgit v1.2.3 From c6df541c00e53a4fdff7a130d4365f848075adcc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Dec 2010 09:56:50 +0000 Subject: Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake" Restore PIPE_CONTROL once again just for Ironlake, as it appears that MI_USER_INTERRUPT does not have the same coherency guarantees, that is on Ironlake the interrupt following a GPU write is not guaranteed to arrive after the write is coherent from the CPU, as it does on the other generations. Reported-by: Zhenyu Wang Reported-by: Shuang He Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32402 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 3 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 162 +++++++++++++++++++++++++++++++- 2 files changed, 162 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2ddb98b5c90f..e4a2e2c3dbe3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -349,7 +349,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_USER_INTERRUPT) + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) notify_ring(dev, &dev_priv->ring[VCS]); @@ -1558,6 +1558,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) else render_irqs = GT_USER_INTERRUPT | + GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a3fd993e0de0..56bc95c056dd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -209,6 +209,78 @@ static int init_ring_common(struct intel_ring_buffer *ring) return 0; } +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +struct pipe_control { + struct drm_i915_gem_object *obj; + volatile u32 *cpu_page; + u32 gtt_offset; +}; + +static int +init_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc; + struct drm_i915_gem_object *obj; + int ret; + + if (ring->private) + return 0; + + pc = kmalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + obj = i915_gem_alloc_object(ring->dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + obj->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096, true); + if (ret) + goto err_unref; + + pc->gtt_offset = obj->gtt_offset; + pc->cpu_page = kmap(obj->pages[0]); + if (pc->cpu_page == NULL) + goto err_unpin; + + pc->obj = obj; + ring->private = pc; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + kfree(pc); + return ret; +} + +static void +cleanup_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + struct drm_i915_gem_object *obj; + + if (!ring->private) + return; + + obj = pc->obj; + kunmap(obj->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + kfree(pc); + ring->private = NULL; +} + static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -222,9 +294,24 @@ static int init_render_ring(struct intel_ring_buffer *ring) I915_WRITE(MI_MODE, mode); } + if (INTEL_INFO(dev)->gen >= 6) { + } else if (IS_GEN5(dev)) { + ret = init_pipe_control(ring); + if (ret) + return ret; + } + return ret; } +static void render_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + cleanup_pipe_control(ring); +} + static void update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) { @@ -299,6 +386,65 @@ intel_ring_sync(struct intel_ring_buffer *ring, return 0; } +#define PIPE_CONTROL_FLUSH(ring__, addr__) \ +do { \ + intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + PIPE_CONTROL_DEPTH_STALL | 2); \ + intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ + intel_ring_emit(ring__, 0); \ + intel_ring_emit(ring__, 0); \ +} while (0) + +static int +pc_render_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently + * incoherent with writes to memory, i.e. completely fubar, + * so we need to use PIPE_NOTIFY instead. + * + * However, we also need to workaround the qword write + * incoherence by flushing the 6 PIPE_NOTIFY buffers out to + * memory before requesting an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} + static int render_ring_add_request(struct intel_ring_buffer *ring, u32 *result) @@ -327,6 +473,13 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; +} + static bool render_ring_get_irq(struct intel_ring_buffer *ring) { @@ -342,7 +495,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, - GT_USER_INTERRUPT); + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -363,7 +516,8 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, - GT_USER_INTERRUPT); + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -776,6 +930,7 @@ static const struct intel_ring_buffer render_ring = { .irq_get = render_ring_get_irq, .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, + .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ @@ -1010,6 +1165,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) *ring = render_ring; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; + } else if (IS_GEN5(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; } if (!I915_NEED_GFX_HWS(dev)) { -- cgit v1.2.3 From 0af7e4dff50454905092d468e91c1ef92e10e6b4 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 8 Dec 2010 04:07:19 +0100 Subject: drm/i915: Add support for precise vblank timestamping (v2) v2: Change IS_IRONLAKE to IS_GEN5 to adapt to 2.6.37 This patch adds new functions for use by the drm core: .get_vblank_timestamp() provides a precise timestamp for the end of the most recent (or current) vblank interval of a given crtc, as needed for the DRI2 implementation of the OML_sync_control extension. It is a thin wrapper around the drm function drm_calc_vbltimestamp_from_scanoutpos() which does almost all the work. .get_scanout_position() provides the current horizontal and vertical video scanout position and "in vblank" status of a given crtc, as needed by the drm for use by drm_calc_vbltimestamp_from_scanoutpos(). The patch modifies the pageflip completion routine to use these precise vblank timestamps as the timestamps for pageflip completion events. This code has been only tested on a HP-Mini Netbook with Atom processor and Intel 945GME gpu. The codepath for (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) gpu's has not been tested so far due to lack of hardware. Signed-off-by: Mario Kleiner Acked-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 7 +++ drivers/gpu/drm/i915/i915_irq.c | 86 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 29 +++++++++--- 5 files changed, 119 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bdb29b2a01ed..9eee6cf7901e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -652,6 +652,8 @@ static struct drm_driver driver = { .device_is_agp = i915_driver_device_is_agp, .enable_vblank = i915_enable_vblank, .disable_vblank = i915_disable_vblank, + .get_vblank_timestamp = i915_get_vblank_timestamp, + .get_scanout_position = i915_get_crtc_scanoutpos, .irq_preinstall = i915_driver_irq_preinstall, .irq_postinstall = i915_driver_irq_postinstall, .irq_uninstall = i915_driver_irq_uninstall, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 30780f2cab6f..53dfc8398a96 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1019,6 +1019,13 @@ void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle (struct drm_device *dev); +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos); #ifdef CONFIG_DEBUG_FS extern void i915_destroy_error_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e4a2e2c3dbe3..adf983f01dda 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -248,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) return I915_READ(reg); } +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 vbl = 0, position = 0; + int vbl_start, vbl_end, htotal, vtotal; + bool in_vbl = true; + int ret = 0; + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " + "pipe %d\n", pipe); + return 0; + } + + /* Get vtotal. */ + vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); + + if (INTEL_INFO(dev)->gen >= 4) { + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ + position = I915_READ(PIPEDSL(pipe)); + + /* Decode into vertical scanout position. Don't have + * horizontal scanout position. + */ + *vpos = position & 0x1fff; + *hpos = 0; + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal + * scanout position. + */ + position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + + htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } + + /* Query vblank area. */ + vbl = I915_READ(VBLANK(pipe)); + + /* Test position against vblank region. */ + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + + if ((*vpos < vbl_start) || (*vpos > vbl_end)) + in_vbl = false; + + /* Inside "upper part" of vblank area? Apply corrective offset: */ + if (in_vbl && (*vpos >= vbl_start)) + *vpos = *vpos - vtotal; + + /* Readouts valid? */ + if (vbl > 0) + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_INVBL; + + return ret; +} + +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *drmcrtc; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Get drm_crtc to timestamp: */ + drmcrtc = intel_get_crtc_for_pipe(dev, crtc); + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, + vblank_time, flags, drmcrtc); +} + /* * Handle hotplug events outside the interrupt handler proper. */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 820e9dfaadc7..c2231f7d2d97 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2253,6 +2253,7 @@ #define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) +#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8645a974a499..0c201d684584 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5252,7 +5252,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) } static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) + struct drm_crtc *crtc, + int called_before_vblirq) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5274,19 +5275,33 @@ static void do_intel_finish_page_flip(struct drm_device *dev, } intel_crtc->unpin_work = NULL; - drm_vblank_put(dev, intel_crtc->pipe); if (work->event) { e = work->event; - do_gettimeofday(&now); - e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &now); + + /* Called before vblank count and timestamps have + * been updated for the vblank interval of flip + * completion? Need to increment vblank count and + * add one videorefresh duration to returned timestamp + * to account for this. + */ + if (called_before_vblirq) { + e->event.sequence++; + now = ns_to_timeval(timeval_to_ns(&now) + + crtc->framedur_ns); + } + e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } + drm_vblank_put(dev, intel_crtc->pipe); + spin_unlock_irqrestore(&dev->event_lock, flags); obj = work->old_fb_obj; @@ -5306,7 +5321,8 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - do_intel_finish_page_flip(dev, crtc); + /* Called after drm_handle_vblank has run for finish vblank. */ + do_intel_finish_page_flip(dev, crtc, 0); } void intel_finish_page_flip_plane(struct drm_device *dev, int plane) @@ -5314,7 +5330,8 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - do_intel_finish_page_flip(dev, crtc); + /* Called before drm_handle_vblank has run for finish vblank. */ + do_intel_finish_page_flip(dev, crtc, 1); } void intel_prepare_page_flip(struct drm_device *dev, int plane) -- cgit v1.2.3 From 3b8d8d91d51c7d15cda51052624169edf7b6dbc6 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 17 Dec 2010 14:19:02 -0800 Subject: drm/i915: dynamic render p-state support for Sandy Bridge Add an interrupt handler for switching graphics frequencies and handling PM interrupts. This should allow for increased performance when busy and lower power consumption when idle. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 54 ++++++++++++++++++++++++++++++------ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 47 +++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 8 +++++- drivers/gpu/drm/i915/i915_suspend.c | 9 ++++-- drivers/gpu/drm/i915/intel_display.c | 36 ++++++++++++++++++++---- drivers/gpu/drm/i915/intel_drv.h | 2 ++ 7 files changed, 137 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 864e75d762e6..92f75782c332 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -797,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - u16 rgvstat = I915_READ16(MEMSTAT_ILK); - - seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); - seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); - seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> - MEMSTAT_VID_SHIFT); - seq_printf(m, "Current P-state: %d\n", - (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + + if (IS_GEN5(dev)) { + u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvstat = I915_READ16(MEMSTAT_ILK); + + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + } else if (IS_GEN6(dev)) { + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + int max_freq; + + /* RPSTAT1 is in the GT power well */ + __gen6_force_wake_get(dev_priv); + + seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); + seq_printf(m, "Render p-state ratio: %d\n", + (gt_perf_status & 0xff00) >> 8); + seq_printf(m, "Render p-state VID: %d\n", + gt_perf_status & 0xff); + seq_printf(m, "Render p-state limit: %d\n", + rp_state_limits & 0xff); + + max_freq = (rp_state_cap & 0xff0000) >> 16; + seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + max_freq * 100); + + max_freq = (rp_state_cap & 0xff00) >> 8; + seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + max_freq * 100); + + max_freq = rp_state_cap & 0xff; + seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + max_freq * 100); + + __gen6_force_wake_put(dev_priv); + } else { + seq_printf(m, "no P-state info available\n"); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 53dfc8398a96..2a653cc80395 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1264,6 +1264,7 @@ extern void intel_disable_fbc(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern bool intel_fbc_enabled(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index adf983f01dda..0dadc025b77b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -397,11 +397,49 @@ static void notify_ring(struct drm_device *dev, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } +static void gen6_pm_irq_handler(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u8 new_delay = dev_priv->cur_delay; + u32 pm_iir; + + pm_iir = I915_READ(GEN6_PMIIR); + if (!pm_iir) + return; + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (dev_priv->cur_delay != dev_priv->max_delay) + new_delay = dev_priv->cur_delay + 1; + if (new_delay > dev_priv->max_delay) + new_delay = dev_priv->max_delay; + } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { + if (dev_priv->cur_delay != dev_priv->min_delay) + new_delay = dev_priv->cur_delay - 1; + if (new_delay < dev_priv->min_delay) { + new_delay = dev_priv->min_delay; + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) | + ((new_delay << 16) & 0x3f0000)); + } else { + /* Make sure we continue to get down interrupts + * until we hit the minimum frequency */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); + } + + } + + gen6_set_rps(dev, new_delay); + dev_priv->cur_delay = new_delay; + + I915_WRITE(GEN6_PMIIR, pm_iir); +} + static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pch_iir; + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; struct drm_i915_master_private *master_priv; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; @@ -417,8 +455,10 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pch_iir = I915_READ(SDEIIR); + pm_iir = I915_READ(GEN6_PMIIR); - if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) + if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && + (!IS_GEN6(dev) || pm_iir == 0)) goto done; if (HAS_PCH_CPT(dev)) @@ -470,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) i915_handle_rps_change(dev); } + if (IS_GEN6(dev)) + gen6_pm_irq_handler(dev); + /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c2231f7d2d97..d60860ec8cf4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1188,6 +1188,10 @@ #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 +#define GEN6_GT_PERF_STATUS 0x145948 +#define GEN6_RP_STATE_LIMITS 0x145994 +#define GEN6_RP_STATE_CAP 0x145998 + /* * Logical Context regs */ @@ -3169,7 +3173,7 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 -#define GEN6_RC_NORMAL_FREQ 0xA008 +#define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) #define GEN6_OFFSET(x) ((x)<<19) @@ -3185,6 +3189,7 @@ #define GEN6_RC_CTL_HW_ENABLE (1<<31) #define GEN6_RP_DOWN_TIMEOUT 0xA010 #define GEN6_RP_INTERRUPT_LIMITS 0xA014 +#define GEN6_RPSTAT1 0xA01C #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_USE_NORMAL_FREQ (1<<9) @@ -3208,6 +3213,7 @@ #define GEN6_RC6_THRESHOLD 0xA0B8 #define GEN6_RC6p_THRESHOLD 0xA0BC #define GEN6_RC6pp_THRESHOLD 0xA0C0 +#define GEN6_PMINTRMSK 0xA168 #define GEN6_PMISR 0x44020 #define GEN6_PMIMR 0x44024 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a311809f3c80..f623efdb1151 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -817,8 +817,10 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveIMR = I915_READ(IMR); } - if (HAS_PCH_SPLIT(dev)) + if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); intel_disable_clock_gating(dev); @@ -867,11 +869,14 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_enable_clock_gating(dev); - if (HAS_PCH_SPLIT(dev)) { + if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c79bee4b4d56..880659680d0a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6021,6 +6021,25 @@ void ironlake_disable_drps(struct drm_device *dev) } +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 swreq; + + swreq = (val & 0x3ff) << 25; + I915_WRITE(GEN6_RPNSWREQ, swreq); +} + +void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; @@ -6107,7 +6126,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -static void gen6_enable_rc6(struct drm_i915_private *dev_priv) +void gen6_enable_rps(struct drm_i915_private *dev_priv) { int i; @@ -6120,7 +6139,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_STATE, 0); __gen6_force_wake_get(dev_priv); - /* disable the counters and set determistic thresholds */ + /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); @@ -6144,7 +6163,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) GEN6_RC_CTL_EI_MODE(1) | GEN6_RC_CTL_HW_ENABLE); - I915_WRITE(GEN6_RC_NORMAL_FREQ, + I915_WRITE(GEN6_RPNSWREQ, GEN6_FREQUENCY(10) | GEN6_OFFSET(0) | GEN6_AGGRESSIVE_TURBO); @@ -6189,6 +6208,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_EI_EXPIRED); + I915_WRITE(GEN6_PMIMR, 0); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); __gen6_force_wake_put(dev_priv); } @@ -6381,9 +6403,6 @@ void intel_enable_clock_gating(struct drm_device *dev) I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } } - - if (IS_GEN6(dev)) - gen6_enable_rc6(dev_priv); } void intel_disable_clock_gating(struct drm_device *dev) @@ -6657,6 +6676,9 @@ void intel_modeset_init(struct drm_device *dev) intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); @@ -6690,6 +6712,8 @@ void intel_modeset_cleanup(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); intel_disable_clock_gating(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index acdea6549ec4..d782ad9fd6db 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -298,6 +298,8 @@ extern void intel_enable_clock_gating(struct drm_device *dev); extern void intel_disable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); +extern void gen6_enable_rps(struct drm_i915_private *dev_priv); +extern void gen6_disable_rps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, -- cgit v1.2.3