aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c545
1 files changed, 255 insertions, 290 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5167d63010b9..5735915facc5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/string_helpers.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -56,6 +57,8 @@
#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+static void skl_sagv_disable(struct drm_i915_private *dev_priv);
+
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
@@ -418,8 +421,8 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
- enableddisabled(enable),
- enableddisabled(was_enabled));
+ str_enabled_disabled(enable),
+ str_enabled_disabled(was_enabled));
return was_enabled;
}
@@ -3669,8 +3672,8 @@ intel_has_sagv(struct drm_i915_private *dev_priv)
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
-static void
-skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+static u32
+intel_sagv_block_time(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 12) {
u32 val = 0;
@@ -3679,26 +3682,48 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
ret = snb_pcode_read(dev_priv,
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
- if (!ret) {
- dev_priv->sagv_block_time_us = val;
- return;
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
+ return 0;
}
- drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
+ return val;
} else if (DISPLAY_VER(dev_priv) == 11) {
- dev_priv->sagv_block_time_us = 10;
- return;
- } else if (DISPLAY_VER(dev_priv) == 10) {
- dev_priv->sagv_block_time_us = 20;
- return;
- } else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->sagv_block_time_us = 30;
- return;
+ return 10;
+ } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
+ return 30;
} else {
- MISSING_CASE(DISPLAY_VER(dev_priv));
+ return 0;
}
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
- dev_priv->sagv_block_time_us = 0;
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
+
+ i915->sagv_block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->sagv_block_time_us))
+ i915->sagv_block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->sagv_block_time_us = 0;
}
/*
@@ -3712,16 +3737,15 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static int
-intel_enable_sagv(struct drm_i915_private *dev_priv)
+static void skl_sagv_enable(struct drm_i915_private *dev_priv)
{
int ret;
if (!intel_has_sagv(dev_priv))
- return 0;
+ return;
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
- return 0;
+ return;
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
@@ -3736,26 +3760,24 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return 0;
+ return;
} else if (ret < 0) {
drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
- return ret;
+ return;
}
dev_priv->sagv_status = I915_SAGV_ENABLED;
- return 0;
}
-static int
-intel_disable_sagv(struct drm_i915_private *dev_priv)
+static void skl_sagv_disable(struct drm_i915_private *dev_priv)
{
int ret;
if (!intel_has_sagv(dev_priv))
- return 0;
+ return;
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
- return 0;
+ return;
drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
@@ -3770,14 +3792,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return 0;
+ return;
} else if (ret < 0) {
drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
- return ret;
+ return;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
- return 0;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
@@ -3790,7 +3811,7 @@ static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
return;
if (!intel_can_enable_sagv(i915, new_bw_state))
- intel_disable_sagv(i915);
+ skl_sagv_disable(i915);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
@@ -3803,7 +3824,7 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
return;
if (intel_can_enable_sagv(i915, new_bw_state))
- intel_enable_sagv(i915);
+ skl_sagv_enable(i915);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
@@ -4325,46 +4346,31 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv)
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
- u32 val, val2;
- u32 fourcc = 0;
+ u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
-
- /* No DDB allocated for disabled planes */
- if (val & PLANE_CTL_ENABLE)
- fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK_SKL,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb_y, val);
- } else {
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
- if (fourcc &&
- drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
- swap(val, val2);
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return;
- skl_ddb_entry_init_from_hw(ddb_y, val);
- skl_ddb_entry_init_from_hw(ddb_uv, val2);
- }
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
}
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv)
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -4380,8 +4386,8 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id)
skl_ddb_get_hw_plane_state(dev_priv, pipe,
plane_id,
- &ddb_y[plane_id],
- &ddb_uv[plane_id]);
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
intel_display_power_put(dev_priv, power_domain, wakeref);
}
@@ -4913,17 +4919,6 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool
}
static bool
-use_min_ddb(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
-static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
@@ -4935,134 +4930,24 @@ use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
}
static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width, height;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- if (plane->id == PLANE_CURSOR)
- return 0;
-
- /*
- * We calculate extra ddb based on ratio plane rate/total data rate
- * in case, in some cases we should not allocate extra ddb for the plane,
- * so do not count its data rate, if this is the case.
- */
- if (use_min_ddb(crtc_state, plane))
- return 0;
-
- if (color_plane == 1 &&
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 0;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
- height = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- /* UV plane does 1/2 pixel sub-sampling */
- if (color_plane == 1) {
- width /= 2;
- height /= 2;
- }
-
- return width * height * fb->format->cpp[color_plane];
-}
-
-static u64
-skl_get_total_relative_data_rate(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- u64 total_data_rate = 0;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
- int i;
-
- /* Calculate and cache data rate for each plane */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe)
- continue;
-
- plane_id = plane->id;
-
- /* packed/y */
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
-
- /* uv-plane */
- crtc_state->uv_plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 1);
- }
+ u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
- total_data_rate += crtc_state->plane_data_rate[plane_id];
- total_data_rate += crtc_state->uv_plane_data_rate[plane_id];
- }
-
- return total_data_rate;
-}
-
-static u64
-icl_get_total_relative_data_rate(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- u64 total_data_rate = 0;
- enum plane_id plane_id;
- int i;
-
- /* Calculate and cache data rate for each plane */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe)
+ if (plane_id == PLANE_CURSOR)
continue;
- plane_id = plane->id;
-
- if (!plane_state->planar_linked_plane) {
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
- } else {
- enum plane_id y_plane_id;
-
- /*
- * The slave plane might not iterate in
- * intel_atomic_crtc_state_for_each_plane_state(),
- * and needs the master plane state which may be
- * NULL if we try get_new_plane_state(), so we
- * always calculate from the master.
- */
- if (plane_state->planar_slave)
- continue;
-
- /* Y plane rate is calculated on the slave */
- y_plane_id = plane_state->planar_linked_plane->id;
- crtc_state->plane_data_rate[y_plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+ data_rate += crtc_state->rel_data_rate[plane_id];
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 1);
- }
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
}
- for_each_plane_id_on_crtc(crtc, plane_id)
- total_data_rate += crtc_state->plane_data_rate[plane_id];
-
- return total_data_rate;
+ return data_rate;
}
const struct skl_wm_level *
@@ -5103,18 +4988,18 @@ skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
* So this is actually safe to do.
*/
static void
-skl_check_wm_level(struct skl_wm_level *wm, u64 total)
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
{
- if (wm->min_ddb_alloc > total)
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
memset(wm, 0, sizeof(*wm));
}
static void
skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
- u64 total, u64 uv_total)
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
{
- if (wm->min_ddb_alloc > total ||
- uv_wm->min_ddb_alloc > uv_total) {
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
memset(wm, 0, sizeof(*wm));
memset(uv_wm, 0, sizeof(*uv_wm));
}
@@ -5134,17 +5019,16 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915,
struct skl_plane_ddb_iter {
u64 data_rate;
- u16 total[I915_MAX_PLANES];
- u16 uv_total[I915_MAX_PLANES];
u16 start, size;
};
-static u16
+static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
const struct skl_wm_level *wm,
u64 data_rate)
{
- u16 extra = 0;
+ u16 size, extra = 0;
if (data_rate) {
extra = min_t(u16, iter->size,
@@ -5154,7 +5038,15 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
iter->data_rate -= data_rate;
}
- return wm->min_ddb_alloc + extra;
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
}
static int
@@ -5168,32 +5060,31 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
int num_active = hweight8(dbuf_state->active_pipes);
- struct skl_plane_ddb_iter iter = {};
+ struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
+ u16 cursor_size;
u32 blocks;
int level;
/* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
- memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (!crtc_state->hw.active)
return 0;
- if (DISPLAY_VER(dev_priv) >= 11)
- iter.data_rate = icl_get_total_relative_data_rate(state, crtc);
- else
- iter.data_rate = skl_get_total_relative_data_rate(state, crtc);
-
+ iter.start = alloc->start;
iter.size = skl_ddb_entry_size(alloc);
if (iter.size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= iter.total[PLANE_CURSOR];
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
- alloc->end - iter.total[PLANE_CURSOR], alloc->end);
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
/*
* Find the highest watermark level for which we can satisfy the block
@@ -5206,7 +5097,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
drm_WARN_ON(&dev_priv->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
@@ -5243,47 +5137,29 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
- iter.total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->wm[level],
- crtc_state->plane_data_rate[plane_id]);
-
- iter.uv_total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
- crtc_state->uv_plane_data_rate[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
}
drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
- /* Set the actual DDB start/end points for each plane */
- iter.start = alloc->start;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_ddb_entry *uv_plane_alloc =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* Gen11+ uses a separate plane for UV watermarks */
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]);
-
- /* Leave disabled planes at (0,0) */
- if (iter.total[plane_id])
- iter.start = skl_ddb_entry_init(plane_alloc, iter.start,
- iter.start + iter.total[plane_id]);
-
- if (iter.uv_total[plane_id])
- iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start,
- iter.start + iter.uv_total[plane_id]);
- }
-
/*
* When we calculated watermark values we didn't know how high
* of a level we'd actually be able to hit, so we just marked
@@ -5292,12 +5168,20 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
- iter.total[plane_id],
- iter.uv_total[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
@@ -5313,12 +5197,24 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* if it turns out we don't have enough DDB blocks for them.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]);
- skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]);
- skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
return 0;
@@ -5408,6 +5304,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
}
wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -5578,6 +5475,25 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(dev_priv, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
@@ -5926,7 +5842,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
- val |= level->blocks;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
intel_de_write_fw(dev_priv, reg, val);
@@ -5940,11 +5856,10 @@ void skl_write_plane_wm(struct intel_plane *plane,
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5954,25 +5869,20 @@ void skl_write_plane_wm(struct intel_plane *plane,
skl_plane_trans_wm(pipe_wm, plane_id));
if (HAS_HW_SAGV_WM(dev_priv)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
&wm->sagv.wm0);
skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
&wm->sagv.trans_wm);
}
- if (DISPLAY_VER(dev_priv) >= 11) {
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb_y);
- return;
- }
-
- if (wm->is_planar)
- swap(ddb_y, ddb_uv);
-
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb_y);
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
}
void skl_write_cursor_wm(struct intel_plane *plane,
@@ -5984,7 +5894,7 @@ void skl_write_cursor_wm(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ &crtc_state->wm.skl.plane_ddb[plane_id];
for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -6081,10 +5991,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
- skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
@@ -6147,7 +6057,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (IS_ALDERLAKE_P(dev_priv))
+ if (HAS_MBUS_JOINING(dev_priv))
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
@@ -6186,8 +6096,8 @@ skl_compute_ddb(struct intel_atomic_state *state)
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->dbuf.slice_mask,
- yesno(old_dbuf_state->joined_mbus),
- yesno(new_dbuf_state->joined_mbus));
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
}
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -6253,8 +6163,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
- new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
@@ -6574,7 +6484,7 @@ static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->blocks = val & PLANE_WM_BLOCKS_MASK;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
}
@@ -6639,7 +6549,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (HAS_MBUS_JOINING(dev_priv))
dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -6656,16 +6566,16 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
- plane_id, ddb_y, ddb_uv);
+ plane_id, ddb, ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
@@ -6689,7 +6599,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
- yesno(dbuf_state->joined_mbus));
+ str_yes_no(dbuf_state->joined_mbus));
}
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
@@ -7000,7 +6910,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
- yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+ str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
+ str_yes_no(wm->fbc_en));
}
void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -7578,6 +7489,9 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
/* Wa_22011091694:adlp */
intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+
+ /* Bspec/49189 Initialize Sequence */
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -8175,8 +8089,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
- if (intel_has_sagv(dev_priv))
- skl_setup_sagv_block_time(dev_priv);
+ intel_sagv_init(dev_priv);
/* For FIFO watermark updates */
if (DISPLAY_VER(dev_priv) >= 9) {
@@ -8301,7 +8214,7 @@ static void update_mbus_pre_enable(struct intel_atomic_state *state)
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- if (!IS_ALDERLAKE_P(dev_priv))
+ if (!HAS_MBUS_JOINING(dev_priv))
return;
/*
@@ -8367,3 +8280,55 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(dev_priv,
new_dbuf_state->enabled_slices);
}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ /* Wa_22010947358:adl-p */
+ if (IS_ALDERLAKE_P(i915))
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active ||
+ !intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
+ }
+}