diff options
147 files changed, 15242 insertions, 7139 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 38dab76ae69e..be71be95b706 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -1823,7 +1823,7 @@ static void drm_dp_destroy_port(struct kref *kref) return; } - kfree(port->cached_edid); + drm_edid_free(port->cached_edid); /* * we can't destroy the connector here, as we might be holding the @@ -2272,8 +2272,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, if (port->pdt != DP_PEER_DEVICE_NONE && drm_dp_mst_is_end_device(port->pdt, port->mcs) && port->port_num >= DP_MST_LOGICAL_PORT_0) - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); + port->cached_edid = drm_edid_read_ddc(port->connector, + &port->aux.ddc); drm_connector_register(port->connector); return; @@ -4133,7 +4133,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector, ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) - port->cached_edid = drm_get_edid(connector, &port->aux.ddc); + port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc); break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) @@ -4147,7 +4147,7 @@ out: EXPORT_SYMBOL(drm_dp_mst_detect_port); /** - * drm_dp_mst_get_edid() - get EDID for an MST port + * drm_dp_mst_edid_read() - get EDID for an MST port * @connector: toplevel connector to get EDID for * @mgr: manager for this port * @port: unverified pointer to a port. @@ -4156,9 +4156,11 @@ EXPORT_SYMBOL(drm_dp_mst_detect_port); * It validates the pointer still exists so the caller doesn't require a * reference. */ -struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - struct edid *edid = NULL; + const struct drm_edid *drm_edid; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); @@ -4166,12 +4168,41 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ return NULL; if (port->cached_edid) - edid = drm_edid_duplicate(port->cached_edid); - else { - edid = drm_get_edid(connector, &port->aux.ddc); - } - port->has_audio = drm_detect_monitor_audio(edid); + drm_edid = drm_edid_dup(port->cached_edid); + else + drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc); + drm_dp_mst_topology_put_port(port); + + return drm_edid; +} +EXPORT_SYMBOL(drm_dp_mst_edid_read); + +/** + * drm_dp_mst_get_edid() - get EDID for an MST port + * @connector: toplevel connector to get EDID for + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This function is deprecated; please use drm_dp_mst_edid_read() instead. + * + * This returns an EDID for the port connected to a connector, + * It validates the pointer still exists so the caller doesn't require a + * reference. + */ +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + const struct drm_edid *drm_edid; + struct edid *edid; + + drm_edid = drm_dp_mst_edid_read(connector, mgr, port); + + edid = drm_edid_duplicate(drm_edid_raw(drm_edid)); + + drm_edid_free(drm_edid); + return edid; } EXPORT_SYMBOL(drm_dp_mst_get_edid); diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c index c869c6e51e2b..fc187a8d8873 100644 --- a/drivers/gpu/drm/display/drm_dsc_helper.c +++ b/drivers/gpu/drm/display/drm_dsc_helper.c @@ -270,6 +270,1012 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); +/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */ +static const u16 drm_dsc_rc_buf_thresh[] = { + 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, + 7744, 7872, 8000, 8064 +}; + +/** + * drm_dsc_set_rc_buf_thresh() - Set thresholds for the RC model + * in accordance with the DSC 1.2 specification. + * + * @vdsc_cfg: DSC Configuration data partially filled by driver + */ +void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg) +{ + int i; + + BUILD_BUG_ON(ARRAY_SIZE(drm_dsc_rc_buf_thresh) != + DSC_NUM_BUF_RANGES - 1); + BUILD_BUG_ON(ARRAY_SIZE(drm_dsc_rc_buf_thresh) != + ARRAY_SIZE(vdsc_cfg->rc_buf_thresh)); + + for (i = 0; i < ARRAY_SIZE(drm_dsc_rc_buf_thresh); i++) + vdsc_cfg->rc_buf_thresh[i] = drm_dsc_rc_buf_thresh[i] >> 6; + + /* + * For 6bpp, RC Buffer threshold 12 and 13 need a different value + * as per C Model + */ + if (vdsc_cfg->bits_per_pixel == 6 << 4) { + vdsc_cfg->rc_buf_thresh[12] = 7936 >> 6; + vdsc_cfg->rc_buf_thresh[13] = 8000 >> 6; + } +} +EXPORT_SYMBOL(drm_dsc_set_rc_buf_thresh); + +struct rc_parameters { + u16 initial_xmit_delay; + u8 first_line_bpg_offset; + u16 initial_offset; + u8 flatness_min_qp; + u8 flatness_max_qp; + u8 rc_quant_incr_limit0; + u8 rc_quant_incr_limit1; + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; +}; + +struct rc_parameters_data { + u8 bpp; + u8 bpc; + struct rc_parameters params; +}; + +#define DSC_BPP(bpp) ((bpp) << 4) + +/* + * Rate Control Related Parameter Recommended Values from DSC_v1.1 spec prior + * to DSC 1.1 fractional bpp underflow SCR (DSC_v1.1_E1.pdf) + * + * Cross-checked against C Model releases: DSC_model_20161212 and 20210623 + */ +static const struct rc_parameters_data rc_parameters_pre_scr[] = { + { + .bpp = DSC_BPP(6), .bpc = 8, + { 683, 15, 6144, 3, 13, 11, 11, { + { 0, 2, 0 }, { 1, 4, -2 }, { 3, 6, -2 }, { 4, 6, -4 }, + { 5, 7, -6 }, { 5, 7, -6 }, { 6, 7, -6 }, { 6, 8, -8 }, + { 7, 9, -8 }, { 8, 10, -10 }, { 9, 11, -10 }, { 10, 12, -12 }, + { 10, 13, -12 }, { 12, 14, -12 }, { 15, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 8, + { 512, 12, 6144, 3, 12, 11, 11, { + { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 }, + { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 10, + { 512, 12, 6144, 7, 16, 15, 15, { + /* + * DSC model/pre-SCR-cfg has 8 for range_max_qp[0], however + * VESA DSC 1.1 Table E-5 sets it to 4. + */ + { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, + { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 12, + { 512, 12, 6144, 11, 20, 19, 19, { + { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, + { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 8, + { 410, 12, 5632, 3, 12, 11, 11, { + { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 11, -10 }, + { 5, 12, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 10, + { 410, 12, 5632, 7, 16, 15, 15, { + { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 15, -10 }, + { 9, 16, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 12, + { 410, 12, 5632, 11, 20, 19, 19, { + { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, + { 13, 19, -10 }, { 13, 20, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 8, + { 341, 15, 2048, 3, 12, 11, 11, { + { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, + { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 10, + { 341, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, + { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, + { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 12, + { 341, 15, 2048, 11, 20, 19, 19, { + { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, + { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, + { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 8, + { 273, 15, 2048, 3, 12, 11, 11, { + { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, + { 1, 2, 2 }, { 1, 3, 0 }, { 1, 4, -2 }, { 2, 4, -4 }, + { 3, 4, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 5, 7, -10 }, + { 5, 8, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 10, + { 273, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, + { 5, 6, 2 }, { 5, 7, 0 }, { 5, 8, -2 }, { 6, 8, -4 }, + { 7, 8, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 9, 11, -10 }, + { 9, 12, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 12, + { 273, 15, 2048, 11, 20, 19, 19, { + { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, + { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, + { 11, 12, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, + { 13, 15, -10 }, { 13, 16, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + } + }, + { /* sentinel */ } +}; + +/* + * Selected Rate Control Related Parameter Recommended Values from DSC v1.2, v1.2a, v1.2b and + * DSC_v1.1_E1 specs. + * + * Cross-checked against C Model releases: DSC_model_20161212 and 20210623 + */ +static const struct rc_parameters_data rc_parameters_1_2_444[] = { + { + .bpp = DSC_BPP(6), .bpc = 8, + { 768, 15, 6144, 3, 13, 11, 11, { + { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 }, + { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 }, + { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 }, + { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 10, + { 768, 15, 6144, 7, 17, 15, 15, { + { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 }, + { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 }, + { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 }, + { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 }, + { 17, 18, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 12, + { 768, 15, 6144, 11, 21, 19, 19, { + { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 }, + { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 }, + { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 }, + { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 }, + { 21, 22, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 14, + { 768, 15, 6144, 15, 25, 23, 23, { + { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 }, + { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 }, + { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 }, + { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 }, + { 25, 26, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 16, + { 768, 15, 6144, 19, 29, 27, 27, { + { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 }, + { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 }, + { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 }, + { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 }, + { 29, 30, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 8, + { 512, 12, 6144, 3, 12, 11, 11, { + { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 }, + { 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 10, + { 512, 12, 6144, 7, 16, 15, 15, { + { 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 }, + { 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 12, + { 512, 12, 6144, 11, 20, 19, 19, { + { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 }, + { 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 14, + { 512, 12, 6144, 15, 24, 23, 23, { + { 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, + { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 16, + { 512, 12, 6144, 19, 28, 27, 27, { + { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, + { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, + { 28, 29, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 8, + { 410, 15, 5632, 3, 12, 11, 11, { + { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, + { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 10, + { 410, 15, 5632, 7, 16, 15, 15, { + { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, + { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 12, + { 410, 15, 5632, 11, 20, 19, 19, { + { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, + { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, + { 19, 20, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 14, + { 410, 15, 5632, 15, 24, 23, 23, { + { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, + { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, + { 23, 24, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 16, + { 410, 15, 5632, 19, 28, 27, 27, { + { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, + { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, + { 27, 28, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 8, + { 341, 15, 2048, 3, 12, 11, 11, { + { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 }, + { 5, 9, -12 }, { 7, 10, -12 }, { 10, 11, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 10, + { 341, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, + { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 }, + { 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 12, + { 341, 15, 2048, 11, 20, 19, 19, { + { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, + { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 }, + { 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 }, + { 18, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 14, + { 341, 15, 2048, 15, 24, 23, 23, { + { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, + { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, + { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, + { 22, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(12), .bpc = 16, + { 341, 15, 2048, 19, 28, 27, 27, { + { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, + { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, + { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, + { 26, 27, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 8, + { 273, 15, 2048, 3, 12, 11, 11, { + { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, + { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, + { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, + { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 10, + { 273, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, + { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, + { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, + { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 12, + { 273, 15, 2048, 11, 20, 19, 19, { + { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, + { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, + { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, + { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, + { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 14, + { 273, 15, 2048, 15, 24, 23, 23, { + { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, + { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, + { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, + { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(15), .bpc = 16, + { 273, 15, 2048, 19, 28, 27, 27, { + { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, + { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, + { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, + { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { /* sentinel */ } +}; + +/* + * Selected Rate Control Related Parameter Recommended Values for 4:2:2 from + * DSC v1.2, v1.2a, v1.2b + * + * Cross-checked against C Model releases: DSC_model_20161212 and 20210623 + */ +static const struct rc_parameters_data rc_parameters_1_2_422[] = { + { + .bpp = DSC_BPP(6), .bpc = 8, + { 512, 15, 6144, 3, 12, 11, 11, { + { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 }, + { 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 10, + { 512, 15, 6144, 7, 16, 15, 15, { + { 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 }, + { 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 12, + { 512, 15, 6144, 11, 20, 19, 19, { + { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 }, + { 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 14, + { 512, 15, 6144, 15, 24, 23, 23, { + { 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, + { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 16, + { 512, 15, 6144, 19, 28, 27, 27, { + { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, + { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, + { 28, 29, -12 } + } + } + }, + { + .bpp = DSC_BPP(7), .bpc = 8, + { 410, 15, 5632, 3, 12, 11, 11, { + { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, + { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } + } + } + }, + { + .bpp = DSC_BPP(7), .bpc = 10, + { 410, 15, 5632, 7, 16, 15, 15, { + { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, + { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } + } + } + }, + { + .bpp = DSC_BPP(7), .bpc = 12, + { 410, 15, 5632, 11, 20, 19, 19, { + { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, + { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, + { 19, 20, -12 } + } + } + }, + { + .bpp = DSC_BPP(7), .bpc = 14, + { 410, 15, 5632, 15, 24, 23, 23, { + { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, + { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, + { 23, 24, -12 } + } + } + }, + { + .bpp = DSC_BPP(7), .bpc = 16, + { 410, 15, 5632, 19, 28, 27, 27, { + { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, + { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, + { 27, 28, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 8, + { 341, 15, 2048, 3, 12, 11, 11, { + { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 }, + { 5, 9, -12 }, { 7, 10, -12 }, { 10, 11, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 10, + { 341, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, + { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 }, + { 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 12, + { 341, 15, 2048, 11, 20, 19, 19, { + { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, + { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 }, + { 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 }, + { 18, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 14, + { 341, 15, 2048, 15, 24, 23, 23, { + { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, + { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, + { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, + { 22, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 16, + { 341, 15, 2048, 19, 28, 27, 27, { + { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, + { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, + { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, + { 26, 27, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 8, + { 273, 15, 2048, 3, 12, 11, 11, { + { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, + { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, + { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, + { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 10, + { 273, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, + { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, + { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, + { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 12, + { 273, 15, 2048, 11, 20, 19, 19, { + { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, + { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, + { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, + { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, + { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 14, + { 273, 15, 2048, 15, 24, 23, 23, { + { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, + { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, + { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, + { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(10), .bpc = 16, + { 273, 15, 2048, 19, 28, 27, 27, { + { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, + { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, + { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, + { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { /* sentinel */ } +}; + +/* + * Selected Rate Control Related Parameter Recommended Values for 4:2:2 from + * DSC v1.2, v1.2a, v1.2b + * + * Cross-checked against C Model releases: DSC_model_20161212 and 20210623 + */ +static const struct rc_parameters_data rc_parameters_1_2_420[] = { + { + .bpp = DSC_BPP(4), .bpc = 8, + { 512, 12, 6144, 3, 12, 11, 11, { + { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 }, + { 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(4), .bpc = 10, + { 512, 12, 6144, 7, 16, 15, 15, { + { 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 }, + { 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(4), .bpc = 12, + { 512, 12, 6144, 11, 20, 19, 19, { + { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 }, + { 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(4), .bpc = 14, + { 512, 12, 6144, 15, 24, 23, 23, { + { 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, + { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { + .bpp = DSC_BPP(4), .bpc = 16, + { 512, 12, 6144, 19, 28, 27, 27, { + { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, + { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, + { 28, 29, -12 } + } + } + }, + { + .bpp = DSC_BPP(5), .bpc = 8, + { 410, 15, 5632, 3, 12, 11, 11, { + { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, + { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } + } + } + }, + { + .bpp = DSC_BPP(5), .bpc = 10, + { 410, 15, 5632, 7, 16, 15, 15, { + { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, + { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } + } + } + }, + { + .bpp = DSC_BPP(5), .bpc = 12, + { 410, 15, 5632, 11, 20, 19, 19, { + { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, + { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, + { 19, 20, -12 } + } + } + }, + { + .bpp = DSC_BPP(5), .bpc = 14, + { 410, 15, 5632, 15, 24, 23, 23, { + { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, + { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, + { 23, 24, -12 } + } + } + }, + { + .bpp = DSC_BPP(5), .bpc = 16, + { 410, 15, 5632, 19, 28, 27, 27, { + { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, + { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, + { 27, 28, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 8, + { 341, 15, 2048, 3, 12, 11, 11, { + { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 }, + { 5, 9, -12 }, { 7, 10, -12 }, { 10, 12, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 10, + { 341, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, + { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 }, + { 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 12, + { 341, 15, 2048, 11, 20, 19, 19, { + { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, + { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 }, + { 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 }, + { 18, 19, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 14, + { 341, 15, 2048, 15, 24, 23, 23, { + { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, + { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, + { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, + { 22, 23, -12 } + } + } + }, + { + .bpp = DSC_BPP(6), .bpc = 16, + { 341, 15, 2048, 19, 28, 27, 27, { + { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, + { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, + { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, + { 26, 27, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 8, + { 256, 15, 2048, 3, 12, 11, 11, { + { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, + { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, + { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, + { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 10, + { 256, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, + { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, + { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, + { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 12, + { 256, 15, 2048, 11, 20, 19, 19, { + { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, + { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, + { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, + { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, + { 16, 17, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 14, + { 256, 15, 2048, 15, 24, 23, 23, { + { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, + { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, + { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, + { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, + { 20, 21, -12 } + } + } + }, + { + .bpp = DSC_BPP(8), .bpc = 16, + { 256, 15, 2048, 19, 28, 27, 27, { + { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, + { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, + { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, + { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, + { 24, 25, -12 } + } + } + }, + { /* sentinel */ } +}; + +static const struct rc_parameters *get_rc_params(const struct rc_parameters_data *rc_parameters, + u16 dsc_bpp, + u8 bits_per_component) +{ + int i; + + for (i = 0; rc_parameters[i].bpp; i++) + if (rc_parameters[i].bpp == dsc_bpp && + rc_parameters[i].bpc == bits_per_component) + return &rc_parameters[i].params; + + return NULL; +} + +/** + * drm_dsc_setup_rc_params() - Set parameters and limits for RC model in + * accordance with the DSC 1.1 or 1.2 specification and DSC C Model + * Required bits_per_pixel and bits_per_component to be set before calling this + * function. + * + * @vdsc_cfg: DSC Configuration data partially filled by driver + * @type: operating mode and standard to follow + * + * Return: 0 or -error code in case of an error + */ +int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type) +{ + const struct rc_parameters_data *data; + const struct rc_parameters *rc_params; + int i; + + if (WARN_ON_ONCE(!vdsc_cfg->bits_per_pixel || + !vdsc_cfg->bits_per_component)) + return -EINVAL; + + switch (type) { + case DRM_DSC_1_2_444: + data = rc_parameters_1_2_444; + break; + case DRM_DSC_1_1_PRE_SCR: + data = rc_parameters_pre_scr; + break; + case DRM_DSC_1_2_422: + data = rc_parameters_1_2_422; + break; + case DRM_DSC_1_2_420: + data = rc_parameters_1_2_420; + break; + default: + return -EINVAL; + } + + rc_params = get_rc_params(data, + vdsc_cfg->bits_per_pixel, + vdsc_cfg->bits_per_component); + if (!rc_params) + return -EINVAL; + + vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset; + vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay; + vdsc_cfg->initial_offset = rc_params->initial_offset; + vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp; + vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp; + vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0; + vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1; + + for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { + vdsc_cfg->rc_range_params[i].range_min_qp = + rc_params->rc_range_params[i].range_min_qp; + vdsc_cfg->rc_range_params[i].range_max_qp = + rc_params->rc_range_params[i].range_max_qp; + /* + * Range BPG Offset uses 2's complement and is only a 6 bits. So + * mask it to get only 6 bits. + */ + vdsc_cfg->rc_range_params[i].range_bpg_offset = + rc_params->rc_range_params[i].range_bpg_offset & + DSC_RANGE_BPG_OFFSET_MASK; + } + + return 0; +} +EXPORT_SYMBOL(drm_dsc_setup_rc_params); + /** * drm_dsc_compute_rc_parameters() - Write rate control * parameters to the dsc configuration defined in diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0454da505687..e0dbd9140726 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2845,6 +2845,35 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, EXPORT_SYMBOL(drm_get_edid_switcheroo); /** + * drm_edid_read_switcheroo - get EDID data for a vga_switcheroo output + * @connector: connector we're probing + * @adapter: I2C adapter to use for DDC + * + * Wrapper around drm_edid_read_ddc() for laptops with dual GPUs using one set + * of outputs. The wrapper adds the requisite vga_switcheroo calls to + * temporarily switch DDC to the GPU which is retrieving EDID. + * + * Return: Pointer to valid EDID or %NULL if we couldn't find any. + */ +const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct drm_device *dev = connector->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + const struct drm_edid *drm_edid; + + if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev))) + return NULL; + + vga_switcheroo_lock_ddc(pdev); + drm_edid = drm_edid_read_ddc(connector, adapter); + vga_switcheroo_unlock_ddc(pdev); + + return drm_edid; +} +EXPORT_SYMBOL(drm_edid_read_switcheroo); + +/** * drm_edid_duplicate - duplicate an EDID and the extensions * @edid: EDID to duplicate * @@ -2852,6 +2881,9 @@ EXPORT_SYMBOL(drm_get_edid_switcheroo); */ struct edid *drm_edid_duplicate(const struct edid *edid) { + if (!edid) + return NULL; + return kmemdup(edid, edid_size(edid), GFP_KERNEL); } EXPORT_SYMBOL(drm_edid_duplicate); @@ -6243,6 +6275,9 @@ static void drm_parse_cea_ext(struct drm_connector *connector, info->color_formats |= DRM_COLOR_FORMAT_YCBCR444; if (edid_ext[3] & EDID_CEA_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCBCR422; + if (edid_ext[3] & EDID_BASIC_AUDIO) + info->has_audio = true; + } drm_edid_iter_end(&edid_iter); @@ -6268,6 +6303,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector, drm_parse_hdr_metadata_block(connector, data); else if (cea_db_tag(db) == CTA_DB_VIDEO) parse_cta_vdb(connector, db); + else if (cea_db_tag(db) == CTA_DB_AUDIO) + info->has_audio = true; } cea_db_iter_end(&iter); @@ -6424,6 +6461,7 @@ static void drm_reset_display_info(struct drm_connector *connector) info->max_tmds_clock = 0; info->dvi_dual = false; info->is_hdmi = false; + info->has_audio = false; info->has_hdmi_infoframe = false; info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c58d7b193664..1c9ed4c52760 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -23,10 +23,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror -# Fine grained warnings disable -CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) -CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) - subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -238,9 +234,12 @@ i915-y += \ display/intel_crtc_state_dump.o \ display/intel_cursor.o \ display/intel_display.o \ + display/intel_display_driver.o \ + display/intel_display_irq.o \ display/intel_display_power.o \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ + display/intel_display_reset.o \ display/intel_display_rps.o \ display/intel_dmc.o \ display/intel_dpio_phy.o \ @@ -259,8 +258,11 @@ i915-y += \ display/intel_hdcp.o \ display/intel_hdcp_gsc.o \ display/intel_hotplug.o \ + display/intel_hotplug_irq.o \ display/intel_hti.o \ + display/intel_load_detect.o \ display/intel_lpe_audio.o \ + display/intel_modeset_lock.o \ display/intel_modeset_verify.o \ display/intel_modeset_setup.o \ display/intel_overlay.o \ @@ -299,8 +301,10 @@ i915-y += \ display/icl_dsi.o \ display/intel_backlight.o \ display/intel_crt.o \ + display/intel_cx0_phy.o \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ + display/intel_display_device.o \ display/intel_display_trace.o \ display/intel_dkl_phy.o \ display/intel_dp.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 920d570f7594..112d91d81fdc 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -169,7 +169,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", dig_port->base.base.base.id, dig_port->base.base.name, str_on_off(state), str_on_off(cur_state)); @@ -180,7 +180,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) { bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 448ea26786e0..5c187e6e0472 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -7,6 +7,7 @@ #include "g4x_hdmi.h" #include "i915_reg.h" +#include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" #include "intel_crtc.h" @@ -80,15 +81,67 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, return ret; } +static bool connector_is_hdmi(struct drm_connector *connector) +{ + struct intel_encoder *encoder = + intel_attached_encoder(to_intel_connector(connector)); + + return encoder && encoder->type == INTEL_OUTPUT_HDMI; +} + +static bool g4x_compute_has_hdmi_sink(struct intel_atomic_state *state, + struct intel_crtc *this_crtc) +{ + const struct drm_connector_state *conn_state; + struct drm_connector *connector; + int i; + + /* + * On g4x only one HDMI port can transmit infoframes/audio at + * any given time. Select the first suitable port for this duty. + * + * See also g4x_hdmi_connector_atomic_check(). + */ + for_each_new_connector_in_state(&state->base, connector, conn_state, i) { + struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!connector_is_hdmi(connector)) + continue; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (!intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state)) + continue; + + return crtc == this_crtc; + } + + return false; +} + static int g4x_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (HAS_PCH_SPLIT(i915)) crtc_state->has_pch_encoder = true; + if (IS_G4X(i915)) + crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc); + else + crtc_state->has_hdmi_sink = + intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state); + return intel_hdmi_compute_config(encoder, crtc_state, conn_state); } @@ -546,6 +599,66 @@ intel_hdmi_hotplug(struct intel_encoder *encoder, return state; } +int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->dev); + struct drm_connector_list_iter conn_iter; + struct drm_connector *conn; + int ret; + + ret = intel_digital_connector_atomic_check(connector, state); + if (ret) + return ret; + + if (!IS_G4X(i915)) + return 0; + + if (!intel_connector_needs_modeset(to_intel_atomic_state(state), connector)) + return 0; + + /* + * On g4x only one HDMI port can transmit infoframes/audio + * at any given time. Make sure all enabled HDMI ports are + * included in the state so that it's possible to select + * one of them for this duty. + * + * See also g4x_compute_has_hdmi_sink(). + */ + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + + if (!connector_is_hdmi(conn)) + continue; + + drm_dbg_kms(&i915->drm, "Adding [CONNECTOR:%d:%s]\n", + conn->base.id, conn->name); + + conn_state = drm_atomic_get_connector_state(state, conn); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + break; + } + + crtc = conn_state->crtc; + if (!crtc) + continue; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + crtc_state->mode_changed = true; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + break; + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h index db9a93bc9321..1e3ea7f3c846 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.h +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h @@ -11,9 +11,13 @@ #include "i915_reg_defs.h" enum port; +struct drm_atomic_state; +struct drm_connector; struct drm_i915_private; void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); +int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state); #endif diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index ecaeb7dc196b..616654adbfb8 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -8,12 +8,12 @@ #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> -#include "i915_irq.h" #include "i915_reg.h" #include "i9xx_plane.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index caef72d38798..af0c79a4c9a4 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -4,6 +4,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_display.h" @@ -3447,9 +3448,10 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv) drm_modeset_acquire_init(&ctx, 0); -retry: state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; +retry: /* * Hardware readout is the only time we don't want to calculate * intermediate watermarks (since we don't trust the current diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h index a7875cbcd05a..b87ae369685a 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.h +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -12,7 +12,6 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; -int ilk_wm_max_level(const struct drm_i915_private *i915); bool ilk_disable_lp_wm(struct drm_i915_private *i915); void ilk_wm_sanitize(struct drm_i915_private *i915); bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index c9aeba0ecf91..c133928a0655 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1591,6 +1591,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; int ret; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; ret = intel_panel_compute_config(intel_connector, adjusted_mode); diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index a9a3f3715279..7cf51dd8c056 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -265,7 +265,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_wm_post = false; crtc_state->fifo_changed = false; crtc_state->preload_luts = false; - crtc_state->inherited = false; crtc_state->wm.need_postvbl_update = false; crtc_state->do_async_flip = false; crtc_state->fb_bits = 0; @@ -311,262 +310,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, kfree(crtc_state); } -static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, - int num_scalers_need, struct intel_crtc *intel_crtc, - const char *name, int idx, - struct intel_plane_state *plane_state, - int *scaler_id) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - int j; - u32 mode; - - if (*scaler_id < 0) { - /* find a free scaler */ - for (j = 0; j < intel_crtc->num_scalers; j++) { - if (scaler_state->scalers[j].in_use) - continue; - - *scaler_id = j; - scaler_state->scalers[*scaler_id].in_use = 1; - break; - } - } - - if (drm_WARN(&dev_priv->drm, *scaler_id < 0, - "Cannot find scaler for %s:%d\n", name, idx)) - return -EINVAL; - - /* set scaler mode */ - if (plane_state && plane_state->hw.fb && - plane_state->hw.fb->format->is_yuv && - plane_state->hw.fb->format->num_planes > 1) { - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - if (DISPLAY_VER(dev_priv) == 9) { - mode = SKL_PS_SCALER_MODE_NV12; - } else if (icl_is_hdr_plane(dev_priv, plane->id)) { - /* - * On gen11+'s HDR planes we only use the scaler for - * scaling. They have a dedicated chroma upsampler, so - * we don't need the scaler to upsample the UV plane. - */ - mode = PS_SCALER_MODE_NORMAL; - } else { - struct intel_plane *linked = - plane_state->planar_linked_plane; - - mode = PS_SCALER_MODE_PLANAR; - - if (linked) - mode |= PS_PLANE_Y_SEL(linked->id); - } - } else if (DISPLAY_VER(dev_priv) >= 10) { - mode = PS_SCALER_MODE_NORMAL; - } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { - /* - * when only 1 scaler is in use on a pipe with 2 scalers - * scaler 0 operates in high quality (HQ) mode. - * In this case use scaler 0 to take advantage of HQ mode - */ - scaler_state->scalers[*scaler_id].in_use = 0; - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - mode = SKL_PS_SCALER_MODE_HQ; - } else { - mode = SKL_PS_SCALER_MODE_DYN; - } - - /* - * FIXME: we should also check the scaler factors for pfit, so - * this shouldn't be tied directly to planes. - */ - if (plane_state && plane_state->hw.fb) { - const struct drm_framebuffer *fb = plane_state->hw.fb; - const struct drm_rect *src = &plane_state->uapi.src; - const struct drm_rect *dst = &plane_state->uapi.dst; - int hscale, vscale, max_vscale, max_hscale; - - /* - * FIXME: When two scalers are needed, but only one of - * them needs to downscale, we should make sure that - * the one that needs downscaling support is assigned - * as the first scaler, so we don't reject downscaling - * unnecessarily. - */ - - if (DISPLAY_VER(dev_priv) >= 14) { - /* - * On versions 14 and up, only the first - * scaler supports a vertical scaling factor - * of more than 1.0, while a horizontal - * scaling factor of 3.0 is supported. - */ - max_hscale = 0x30000 - 1; - if (*scaler_id == 0) - max_vscale = 0x30000 - 1; - else - max_vscale = 0x10000; - - } else if (DISPLAY_VER(dev_priv) >= 10 || - !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { - max_hscale = 0x30000 - 1; - max_vscale = 0x30000 - 1; - } else { - max_hscale = 0x20000 - 1; - max_vscale = 0x20000 - 1; - } - - /* - * FIXME: We should change the if-else block above to - * support HQ vs dynamic scaler properly. - */ - - /* Check if required scaling is within limits */ - hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); - vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); - - if (hscale < 0 || vscale < 0) { - drm_dbg_kms(&dev_priv->drm, - "Scaler %d doesn't support required plane scaling\n", - *scaler_id); - drm_rect_debug_print("src: ", src, true); - drm_rect_debug_print("dst: ", dst, false); - - return -EINVAL; - } - } - - drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", - intel_crtc->pipe, *scaler_id, name, idx); - scaler_state->scalers[*scaler_id].mode = mode; - - return 0; -} - -/** - * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests - * @dev_priv: i915 device - * @intel_crtc: intel crtc - * @crtc_state: incoming crtc_state to validate and setup scalers - * - * This function sets up scalers based on staged scaling requests for - * a @crtc and its planes. It is called from crtc level check path. If request - * is a supportable request, it attaches scalers to requested planes and crtc. - * - * This function takes into account the current scaler(s) in use by any planes - * not being part of this atomic state - * - * Returns: - * 0 - scalers were setup succesfully - * error code - otherwise - */ -int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_plane *plane = NULL; - struct intel_plane *intel_plane; - struct intel_plane_state *plane_state = NULL; - struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - struct drm_atomic_state *drm_state = crtc_state->uapi.state; - struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); - int num_scalers_need; - int i; - - num_scalers_need = hweight32(scaler_state->scaler_users); - - /* - * High level flow: - * - staged scaler requests are already in scaler_state->scaler_users - * - check whether staged scaling requests can be supported - * - add planes using scalers that aren't in current transaction - * - assign scalers to requested users - * - as part of plane commit, scalers will be committed - * (i.e., either attached or detached) to respective planes in hw - * - as part of crtc_commit, scaler will be either attached or detached - * to crtc in hw - */ - - /* fail if required scalers > available scalers */ - if (num_scalers_need > intel_crtc->num_scalers){ - drm_dbg_kms(&dev_priv->drm, - "Too many scaling requests %d > %d\n", - num_scalers_need, intel_crtc->num_scalers); - return -EINVAL; - } - - /* walkthrough scaler_users bits and start assigning scalers */ - for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { - int *scaler_id; - const char *name; - int idx, ret; - - /* skip if scaler not required */ - if (!(scaler_state->scaler_users & (1 << i))) - continue; - - if (i == SKL_CRTC_INDEX) { - name = "CRTC"; - idx = intel_crtc->base.base.id; - - /* panel fitter case: assign as a crtc scaler */ - scaler_id = &scaler_state->scaler_id; - } else { - name = "PLANE"; - - /* plane scaler case: assign as a plane scaler */ - /* find the plane that set the bit as scaler_user */ - plane = drm_state->planes[i].ptr; - - /* - * to enable/disable hq mode, add planes that are using scaler - * into this transaction - */ - if (!plane) { - struct drm_plane_state *state; - - /* - * GLK+ scalers don't have a HQ mode so it - * isn't necessary to change between HQ and dyn mode - * on those platforms. - */ - if (DISPLAY_VER(dev_priv) >= 10) - continue; - - plane = drm_plane_from_index(&dev_priv->drm, i); - state = drm_atomic_get_plane_state(drm_state, plane); - if (IS_ERR(state)) { - drm_dbg_kms(&dev_priv->drm, - "Failed to add [PLANE:%d] to drm_state\n", - plane->base.id); - return PTR_ERR(state); - } - } - - intel_plane = to_intel_plane(plane); - idx = plane->base.id; - - /* plane on different crtc cannot be a scaler user of this crtc */ - if (drm_WARN_ON(&dev_priv->drm, - intel_plane->pipe != intel_crtc->pipe)) - continue; - - plane_state = intel_atomic_get_new_plane_state(intel_state, - intel_plane); - scaler_id = &plane_state->scaler_id; - } - - ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need, - intel_crtc, name, idx, - plane_state, scaler_id); - if (ret < 0) - return ret; - } - - return 0; -} - struct drm_atomic_state * intel_atomic_state_alloc(struct drm_device *dev) { @@ -599,6 +342,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) drm_atomic_state_default_clear(&state->base); intel_atomic_clear_global_state(state); + /* state->internal not reset on purpose */ + state->dpll_set = state->modeset = false; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 1dc439983dd9..e506f6a87344 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -52,8 +52,4 @@ struct intel_crtc_state * intel_atomic_get_crtc_state(struct drm_atomic_state *state, struct intel_crtc *crtc); -int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state); - #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index f33164b10292..4125ee07a271 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -36,6 +36,7 @@ #include <drm/drm_fourcc.h> #include "i915_config.h" +#include "i915_reg.h" #include "intel_atomic_plane.h" #include "intel_cdclk.h" #include "intel_display_rps.h" diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 3d5a9bbc6fde..3d9c9b4f27f8 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -1039,6 +1039,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915, return; state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; retry: ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc, diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 75e69dffc5e9..34a397adbd6b 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2141,58 +2141,58 @@ static u8 translate_iboost(u8 val) static const u8 cnp_ddc_pin_map[] = { [0] = 0, /* N/A */ - [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, - [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, - [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ - [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */ + [GMBUS_PIN_1_BXT] = DDC_BUS_DDI_B, + [GMBUS_PIN_2_BXT] = DDC_BUS_DDI_C, + [GMBUS_PIN_4_CNP] = DDC_BUS_DDI_D, /* sic */ + [GMBUS_PIN_3_BXT] = DDC_BUS_DDI_F, /* sic */ }; static const u8 icp_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT, - [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, - [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, - [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, - [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, - [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP, - [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, + [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, + [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, + [GMBUS_PIN_3_BXT] = TGL_DDC_BUS_DDI_C, + [GMBUS_PIN_9_TC1_ICP] = ICL_DDC_BUS_PORT_1, + [GMBUS_PIN_10_TC2_ICP] = ICL_DDC_BUS_PORT_2, + [GMBUS_PIN_11_TC3_ICP] = ICL_DDC_BUS_PORT_3, + [GMBUS_PIN_12_TC4_ICP] = ICL_DDC_BUS_PORT_4, + [GMBUS_PIN_13_TC5_TGP] = TGL_DDC_BUS_PORT_5, + [GMBUS_PIN_14_TC6_TGP] = TGL_DDC_BUS_PORT_6, }; static const u8 rkl_pch_tgp_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP, - [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP, + [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, + [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, + [GMBUS_PIN_9_TC1_ICP] = RKL_DDC_BUS_DDI_D, + [GMBUS_PIN_10_TC2_ICP] = RKL_DDC_BUS_DDI_E, }; static const u8 adls_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, - [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, - [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, - [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, + [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, + [GMBUS_PIN_9_TC1_ICP] = ADLS_DDC_BUS_PORT_TC1, + [GMBUS_PIN_10_TC2_ICP] = ADLS_DDC_BUS_PORT_TC2, + [GMBUS_PIN_11_TC3_ICP] = ADLS_DDC_BUS_PORT_TC3, + [GMBUS_PIN_12_TC4_ICP] = ADLS_DDC_BUS_PORT_TC4, }; static const u8 gen9bc_tgp_ddc_pin_map[] = { - [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, - [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, + [GMBUS_PIN_2_BXT] = DDC_BUS_DDI_B, + [GMBUS_PIN_9_TC1_ICP] = DDC_BUS_DDI_C, + [GMBUS_PIN_10_TC2_ICP] = DDC_BUS_DDI_D, }; static const u8 adlp_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, - [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, - [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, - [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, + [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, + [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, + [GMBUS_PIN_9_TC1_ICP] = ADLP_DDC_BUS_PORT_TC1, + [GMBUS_PIN_10_TC2_ICP] = ADLP_DDC_BUS_PORT_TC2, + [GMBUS_PIN_11_TC3_ICP] = ADLP_DDC_BUS_PORT_TC3, + [GMBUS_PIN_12_TC4_ICP] = ADLP_DDC_BUS_PORT_TC4, }; static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) { const u8 *ddc_pin_map; - int n_entries; + int i, n_entries; if (HAS_PCH_MTP(i915) || IS_ALDERLAKE_P(i915)) { ddc_pin_map = adlp_ddc_pin_map; @@ -2219,8 +2219,10 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) return vbt_pin; } - if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0) - return ddc_pin_map[vbt_pin]; + for (i = 0; i < n_entries; i++) { + if (ddc_pin_map[i] == vbt_pin) + return i; + } drm_dbg_kms(&i915->drm, "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", @@ -2675,8 +2677,9 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, supports_tbt = intel_bios_encoder_supports_tbt(devdata); drm_dbg_kms(&i915->drm, - "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", + "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d DP++:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, + intel_bios_encoder_supports_dp_dual_mode(devdata), intel_bios_encoder_is_lspcon(devdata), supports_typec_usb, supports_tbt, devdata->dsc != NULL); @@ -3030,6 +3033,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return vbt; } +static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) +{ + intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset); + + return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); +} + static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) { u32 count, data, found, store = 0; @@ -3046,9 +3056,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) oprom_offset &= OROM_OFFSET_MASK; for (count = 0; count < oprom_size; count += 4) { - intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, oprom_offset + count); - data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); - + data = intel_spi_read(&i915->uncore, oprom_offset + count); if (data == *((const u32 *)"$VBT")) { found = oprom_offset + count; break; @@ -3059,20 +3067,16 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) goto err_not_found; /* Get VBT size and allocate space for the VBT */ - intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + - offsetof(struct vbt_header, vbt_size)); - vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + vbt_size = intel_spi_read(&i915->uncore, + found + offsetof(struct vbt_header, vbt_size)); vbt_size &= 0xffff; vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); if (!vbt) goto err_not_found; - for (count = 0; count < vbt_size; count += 4) { - intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + count); - data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); - *(vbt + store++) = data; - } + for (count = 0; count < vbt_size; count += 4) + *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); if (!intel_bios_is_valid_vbt(vbt, vbt_size)) goto err_free_vbt; @@ -3424,7 +3428,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) return false; } -static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) +bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) { const struct child_device_config *child = &devdata->child; @@ -3443,15 +3447,6 @@ static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_enc return false; } -bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = - intel_bios_encoder_data_lookup(i915, port); - - return devdata && intel_bios_encoder_supports_dp_dual_mode(devdata); -} - /** * intel_bios_is_dsi_present - is DSI present in VBT * @i915: i915 device instance @@ -3578,84 +3573,82 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, return false; } +static const u8 adlp_aux_ch_map[] = { + [AUX_CH_A] = DP_AUX_A, + [AUX_CH_B] = DP_AUX_B, + [AUX_CH_C] = DP_AUX_C, + [AUX_CH_D_XELPD] = DP_AUX_D, + [AUX_CH_E_XELPD] = DP_AUX_E, + [AUX_CH_USBC1] = DP_AUX_F, + [AUX_CH_USBC2] = DP_AUX_G, + [AUX_CH_USBC3] = DP_AUX_H, + [AUX_CH_USBC4] = DP_AUX_I, +}; + +/* + * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E + * map to DDI A,TC1,TC2,TC3,TC4 respectively. + */ +static const u8 adls_aux_ch_map[] = { + [AUX_CH_A] = DP_AUX_A, + [AUX_CH_USBC1] = DP_AUX_B, + [AUX_CH_USBC2] = DP_AUX_C, + [AUX_CH_USBC3] = DP_AUX_D, + [AUX_CH_USBC4] = DP_AUX_E, +}; + +/* + * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D + * map to DDI A,B,TC1,TC2 respectively. + */ +static const u8 rkl_aux_ch_map[] = { + [AUX_CH_A] = DP_AUX_A, + [AUX_CH_B] = DP_AUX_B, + [AUX_CH_USBC1] = DP_AUX_C, + [AUX_CH_USBC2] = DP_AUX_D, +}; + +static const u8 direct_aux_ch_map[] = { + [AUX_CH_A] = DP_AUX_A, + [AUX_CH_B] = DP_AUX_B, + [AUX_CH_C] = DP_AUX_C, + [AUX_CH_D] = DP_AUX_D, /* aka AUX_CH_USBC1 */ + [AUX_CH_E] = DP_AUX_E, /* aka AUX_CH_USBC2 */ + [AUX_CH_F] = DP_AUX_F, /* aka AUX_CH_USBC3 */ + [AUX_CH_G] = DP_AUX_G, /* aka AUX_CH_USBC4 */ + [AUX_CH_H] = DP_AUX_H, /* aka AUX_CH_USBC5 */ + [AUX_CH_I] = DP_AUX_I, /* aka AUX_CH_USBC6 */ +}; + static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) { - enum aux_ch aux_ch; + const u8 *aux_ch_map; + int i, n_entries; - /* - * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D - * map to DDI A,B,TC1,TC2 respectively. - * - * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E - * map to DDI A,TC1,TC2,TC3,TC4 respectively. - */ - switch (aux_channel) { - case DP_AUX_A: - aux_ch = AUX_CH_A; - break; - case DP_AUX_B: - if (IS_ALDERLAKE_S(i915)) - aux_ch = AUX_CH_USBC1; - else - aux_ch = AUX_CH_B; - break; - case DP_AUX_C: - if (IS_ALDERLAKE_S(i915)) - aux_ch = AUX_CH_USBC2; - else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) - aux_ch = AUX_CH_USBC1; - else - aux_ch = AUX_CH_C; - break; - case DP_AUX_D: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_D_XELPD; - else if (IS_ALDERLAKE_S(i915)) - aux_ch = AUX_CH_USBC3; - else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) - aux_ch = AUX_CH_USBC2; - else - aux_ch = AUX_CH_D; - break; - case DP_AUX_E: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_E_XELPD; - else if (IS_ALDERLAKE_S(i915)) - aux_ch = AUX_CH_USBC4; - else - aux_ch = AUX_CH_E; - break; - case DP_AUX_F: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_USBC1; - else - aux_ch = AUX_CH_F; - break; - case DP_AUX_G: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_USBC2; - else - aux_ch = AUX_CH_G; - break; - case DP_AUX_H: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_USBC3; - else - aux_ch = AUX_CH_H; - break; - case DP_AUX_I: - if (DISPLAY_VER(i915) >= 13) - aux_ch = AUX_CH_USBC4; - else - aux_ch = AUX_CH_I; - break; - default: - MISSING_CASE(aux_channel); - aux_ch = AUX_CH_A; - break; + if (DISPLAY_VER(i915) >= 13) { + aux_ch_map = adlp_aux_ch_map; + n_entries = ARRAY_SIZE(adlp_aux_ch_map); + } else if (IS_ALDERLAKE_S(i915)) { + aux_ch_map = adls_aux_ch_map; + n_entries = ARRAY_SIZE(adls_aux_ch_map); + } else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) { + aux_ch_map = rkl_aux_ch_map; + n_entries = ARRAY_SIZE(rkl_aux_ch_map); + } else { + aux_ch_map = direct_aux_ch_map; + n_entries = ARRAY_SIZE(direct_aux_ch_map); + } + + for (i = 0; i < n_entries; i++) { + if (aux_ch_map[i] == aux_channel) + return i; } - return aux_ch; + drm_dbg_kms(&i915->drm, + "Ignoring alternate AUX CH: VBT claims AUX 0x%x, which is not valid for this platform\n", + aux_channel); + + return AUX_CH_NONE; } enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 8a0730c9b48c..45fae97d9719 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -247,7 +247,6 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); -bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, @@ -264,6 +263,7 @@ bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdat bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 202321ffbe2a..597d5816ad1b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -150,6 +150,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, { int ret; + if (DISPLAY_VER(dev_priv) >= 14) + return 0; + /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, points_mask, diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 084a483f9776..1a5268e3d0a3 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1453,6 +1453,18 @@ static u8 tgl_calc_voltage_level(int cdclk) return 0; } +static u8 rplu_calc_voltage_level(int cdclk) +{ + if (cdclk > 556800) + return 3; + else if (cdclk > 480000) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; +} + static void icl_readout_refclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { @@ -1896,7 +1908,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * mailbox communication, skip * this step. */ - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) /* NOOP */; else if (DISPLAY_VER(dev_priv) >= 11) ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, @@ -1932,10 +1944,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * NOOP - No Pcode communication needed for * Display versions 14 and beyond */; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv)) ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); - else + if (DISPLAY_VER(dev_priv) < 11) { /* * The timeout isn't specified, the 2ms used here is based on * experiment. @@ -1946,7 +1958,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level, 150, 2); - + } if (ret) { drm_err(&dev_priv->drm, "PCode CDCLK freq set failed, (err %d, freq %d)\n", @@ -2242,6 +2254,38 @@ void intel_cdclk_dump_config(struct drm_i915_private *i915, cdclk_config->voltage_level); } +static void intel_pcode_notify(struct drm_i915_private *i915, + u8 voltage_level, + u8 active_pipe_count, + u16 cdclk, + bool cdclk_update_valid, + bool pipe_count_update_valid) +{ + int ret; + u32 update_mask = 0; + + if (!IS_DG2(i915)) + return; + + update_mask = DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, active_pipe_count, voltage_level); + + if (cdclk_update_valid) + update_mask |= DISPLAY_TO_PCODE_CDCLK_VALID; + + if (pipe_count_update_valid) + update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID; + + ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE | + update_mask, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + if (ret) + drm_err(&i915->drm, + "Failed to inform PCU about display config (err %d)\n", + ret); +} + /** * intel_set_cdclk - Push the CDCLK configuration to the hardware * @dev_priv: i915 device @@ -2311,6 +2355,88 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, } } +static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state = + intel_atomic_get_old_cdclk_state(state); + const struct intel_cdclk_state *new_cdclk_state = + intel_atomic_get_new_cdclk_state(state); + unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0; + bool change_cdclk, update_pipe_count; + + if (!intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual) && + new_cdclk_state->active_pipes == + old_cdclk_state->active_pipes) + return; + + /* According to "Sequence Before Frequency Change", voltage level set to 0x3 */ + voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX; + + change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; + update_pipe_count = hweight8(new_cdclk_state->active_pipes) > + hweight8(old_cdclk_state->active_pipes); + + /* + * According to "Sequence Before Frequency Change", + * if CDCLK is increasing, set bits 25:16 to upcoming CDCLK, + * if CDCLK is decreasing or not changing, set bits 25:16 to current CDCLK, + * which basically means we choose the maximum of old and new CDCLK, if we know both + */ + if (change_cdclk) + cdclk = max(new_cdclk_state->actual.cdclk, old_cdclk_state->actual.cdclk); + + /* + * According to "Sequence For Pipe Count Change", + * if pipe count is increasing, set bits 25:16 to upcoming pipe count + * (power well is enabled) + * no action if it is decreasing, before the change + */ + if (update_pipe_count) + num_active_pipes = hweight8(new_cdclk_state->active_pipes); + + intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + change_cdclk, update_pipe_count); +} + +static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_cdclk_state *new_cdclk_state = + intel_atomic_get_new_cdclk_state(state); + const struct intel_cdclk_state *old_cdclk_state = + intel_atomic_get_old_cdclk_state(state); + unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0; + bool update_cdclk, update_pipe_count; + + /* According to "Sequence After Frequency Change", set voltage to used level */ + voltage_level = new_cdclk_state->actual.voltage_level; + + update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; + update_pipe_count = hweight8(new_cdclk_state->active_pipes) < + hweight8(old_cdclk_state->active_pipes); + + /* + * According to "Sequence After Frequency Change", + * set bits 25:16 to current CDCLK + */ + if (update_cdclk) + cdclk = new_cdclk_state->actual.cdclk; + + /* + * According to "Sequence For Pipe Count Change", + * if pipe count is decreasing, set bits 25:16 to current pipe count, + * after the change(power well is disabled) + * no action if it is increasing, after the change + */ + if (update_pipe_count) + num_active_pipes = hweight8(new_cdclk_state->active_pipes); + + intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + update_cdclk, update_pipe_count); +} + /** * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware * @state: intel atomic state @@ -2321,7 +2447,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2332,11 +2458,14 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) &new_cdclk_state->actual)) return; + if (IS_DG2(i915)) + intel_cdclk_pcode_pre_notify(state); + if (pipe == INVALID_PIPE || old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { - drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); + intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); } } @@ -2350,7 +2479,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2361,11 +2490,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) &new_cdclk_state->actual)) return; + if (IS_DG2(i915)) + intel_cdclk_pcode_post_notify(state); + if (pipe != INVALID_PIPE && old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { - drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); + intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); } } @@ -2871,6 +3003,21 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv) return 0; } +static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, + const struct intel_cdclk_state *old_cdclk_state, + const struct intel_cdclk_state *new_cdclk_state) +{ + bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) != + hweight8(new_cdclk_state->active_pipes); + bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual); + /* + * We need to poke hw for gen >= 12, because we notify PCode if + * pipe power well count changes. + */ + return cdclk_changed || (IS_DG2(i915) && power_well_cnt_changed); +} + int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -2892,8 +3039,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; - if (intel_cdclk_changed(&old_cdclk_state->actual, - &new_cdclk_state->actual)) { + if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) { /* * Also serialize commits across all crtcs * if the actual hw needs to be poked. @@ -3235,6 +3381,27 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) return freq; } +static int i915_cdclk_info_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + + seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); + seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); + seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info); + +void intel_cdclk_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root, + i915, &i915_cdclk_info_fops); +} + static const struct intel_cdclk_funcs mtl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3242,6 +3409,13 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = { .calc_voltage_level = tgl_calc_voltage_level, }; +static const struct intel_cdclk_funcs rplu_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = rplu_calc_voltage_level, +}; + static const struct intel_cdclk_funcs tgl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3384,14 +3558,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; /* Wa_22011320316:adl-p[a0] */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; - else if (IS_ADLP_RPLU(dev_priv)) + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + } else if (IS_ADLP_RPLU(dev_priv)) { dev_priv->display.cdclk.table = rplu_cdclk_table; - else + dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; + } else { dev_priv->display.cdclk.table = adlp_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + } } else if (IS_ROCKETLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = rkl_cdclk_table; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 51e2f6a11ce4..48fd7d39e0cd 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -82,5 +82,6 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state); to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) int intel_cdclk_init(struct drm_i915_private *dev_priv); +void intel_cdclk_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 36aac88143ac..8966e6560516 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -70,6 +70,11 @@ struct intel_color_funcs { const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut); + /* + * Read out the CSCs (if any) from the hardware into the + * software state. Used by eg. the hardware state checker. + */ + void (*read_csc)(struct intel_crtc_state *crtc_state); }; #define CTM_COEFF_SIGN (1ULL << 63) @@ -116,46 +121,52 @@ struct intel_color_funcs { #define ILK_CSC_COEFF_FP(coeff, fbits) \ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) -#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0 #define ILK_CSC_COEFF_1_0 0x7800 - -#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) - -/* Nop pre/post offsets */ -static const u16 ilk_csc_off_zero[3] = {}; - -/* Identity matrix */ -static const u16 ilk_csc_coeff_identity[9] = { - ILK_CSC_COEFF_1_0, 0, 0, - 0, ILK_CSC_COEFF_1_0, 0, - 0, 0, ILK_CSC_COEFF_1_0, -}; - -/* Limited range RGB post offsets */ -static const u16 ilk_csc_postoff_limited_range[3] = { - ILK_CSC_POSTOFF_LIMITED_RANGE, - ILK_CSC_POSTOFF_LIMITED_RANGE, - ILK_CSC_POSTOFF_LIMITED_RANGE, +#define ILK_CSC_COEFF_LIMITED_RANGE ((235 - 16) << (12 - 8)) /* exponent 0 */ +#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 << (12 - 8)) + +static const struct intel_csc_matrix ilk_csc_matrix_identity = { + .preoff = {}, + .coeff = { + ILK_CSC_COEFF_1_0, 0, 0, + 0, ILK_CSC_COEFF_1_0, 0, + 0, 0, ILK_CSC_COEFF_1_0, + }, + .postoff = {}, }; /* Full range RGB -> limited range RGB matrix */ -static const u16 ilk_csc_coeff_limited_range[9] = { - ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, - 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, - 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, +static const struct intel_csc_matrix ilk_csc_matrix_limited_range = { + .preoff = {}, + .coeff = { + ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, + 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, + 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, + }, + .postoff = { + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, + }, }; /* BT.709 full range RGB -> limited range YCbCr matrix */ -static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { - 0x1e08, 0x9cc0, 0xb528, - 0x2ba8, 0x09d8, 0x37e8, - 0xbce8, 0x9ad8, 0x1e08, +static const struct intel_csc_matrix ilk_csc_matrix_rgb_to_ycbcr = { + .preoff = {}, + .coeff = { + 0x1e08, 0x9cc0, 0xb528, + 0x2ba8, 0x09d8, 0x37e8, + 0xbce8, 0x9ad8, 0x1e08, + }, + .postoff = { + 0x0800, 0x0100, 0x0800, + }, }; -/* Limited range YCbCr post offsets */ -static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { - 0x0800, 0x0100, 0x0800, -}; +static void intel_csc_clear(struct intel_csc_matrix *csc) +{ + memset(csc, 0, sizeof(*csc)); +} static bool lut_is_legacy(const struct drm_property_blob *lut) { @@ -189,69 +200,182 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) } static void ilk_update_pipe_csc(struct intel_crtc *crtc, - const u16 preoff[3], - const u16 coeff[9], - const u16 postoff[3]) + const struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), preoff[2]); + intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]); + intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]); + intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]); intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe), - coeff[0] << 16 | coeff[1]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); + csc->coeff[0] << 16 | csc->coeff[1]); + intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), + csc->coeff[2] << 16); intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe), - coeff[3] << 16 | coeff[4]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); + csc->coeff[3] << 16 | csc->coeff[4]); + intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), + csc->coeff[5] << 16); intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe), - coeff[6] << 16 | coeff[7]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); + csc->coeff[6] << 16 | csc->coeff[7]); + intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), + csc->coeff[8] << 16); - if (DISPLAY_VER(i915) >= 7) { - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), - postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), - postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), - postoff[2]); - } + if (DISPLAY_VER(i915) < 7) + return; + + intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]); + intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]); + intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]); +} + +static void ilk_read_pipe_csc(struct intel_crtc *crtc, + struct intel_csc_matrix *csc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 tmp; + + csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe)); + + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe)); + csc->coeff[0] = tmp >> 16; + csc->coeff[1] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe)); + csc->coeff[2] = tmp >> 16; + + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe)); + csc->coeff[3] = tmp >> 16; + csc->coeff[4] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe)); + csc->coeff[5] = tmp >> 16; + + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe)); + csc->coeff[6] = tmp >> 16; + csc->coeff[7] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe)); + csc->coeff[8] = tmp >> 16; + + if (DISPLAY_VER(i915) < 7) + return; + + csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe)); +} + +static void ilk_read_csc(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->csc_enable) + ilk_read_pipe_csc(crtc, &crtc_state->csc); +} + +static void skl_read_csc(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + /* + * Display WA #1184: skl,glk + * Wa_1406463849: icl + * + * Danger! On SKL-ICL *reads* from the CSC coeff/offset registers + * will disarm an already armed CSC double buffer update. + * So this must not be called while armed. Fortunately the state checker + * readout happens only after the update has been already been latched. + * + * On earlier and later platforms only writes to said registers will + * disarm the update. This is considered normal behavior and also + * happens with various other hardware units. + */ + if (crtc_state->csc_enable) + ilk_read_pipe_csc(crtc, &crtc_state->csc); } static void icl_update_output_csc(struct intel_crtc *crtc, - const u16 preoff[3], - const u16 coeff[9], - const u16 postoff[3]) + const struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]); + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]); + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), - coeff[0] << 16 | coeff[1]); + csc->coeff[0] << 16 | csc->coeff[1]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe), - coeff[2] << 16); + csc->coeff[2] << 16); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), - coeff[3] << 16 | coeff[4]); + csc->coeff[3] << 16 | csc->coeff[4]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe), - coeff[5] << 16); + csc->coeff[5] << 16); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), - coeff[6] << 16 | coeff[7]); + csc->coeff[6] << 16 | csc->coeff[7]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe), - coeff[8] << 16); + csc->coeff[8] << 16); + + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]); + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]); + intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]); +} + +static void icl_read_output_csc(struct intel_crtc *crtc, + struct intel_csc_matrix *csc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 tmp; + + csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); + + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); + csc->coeff[0] = tmp >> 16; + csc->coeff[1] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); + csc->coeff[2] = tmp >> 16; + + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); + csc->coeff[3] = tmp >> 16; + csc->coeff[4] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); + csc->coeff[5] = tmp >> 16; + + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); + csc->coeff[6] = tmp >> 16; + csc->coeff[7] = tmp & 0xffff; + tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); + csc->coeff[8] = tmp >> 16; - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); + csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); +} + +static void icl_read_csc(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + /* + * Wa_1406463849: icl + * + * See skl_read_csc() + */ + if (crtc_state->csc_mode & ICL_CSC_ENABLE) + ilk_read_pipe_csc(crtc, &crtc_state->csc); + + if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) + icl_read_output_csc(crtc, &crtc_state->output_csc); } static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) @@ -294,14 +418,32 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) return !ilk_lut_limited_range(crtc_state); } +static void ilk_csc_copy(struct drm_i915_private *i915, + struct intel_csc_matrix *dst, + const struct intel_csc_matrix *src) +{ + *dst = *src; + + if (DISPLAY_VER(i915) < 7) + memset(dst->postoff, 0, sizeof(dst->postoff)); +} + static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, - u16 coeffs[9], bool limited_color_range) + struct intel_csc_matrix *csc, + bool limited_color_range) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; const u64 *input; u64 temp[9]; int i; + /* for preoff/postoff */ + if (limited_color_range) + ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range); + else + ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity); + if (limited_color_range) input = ctm_mult_by_limited(temp, ctm->matrix); else @@ -320,54 +462,49 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, */ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); - coeffs[i] = 0; + csc->coeff[i] = 0; /* sign bit */ if (CTM_COEFF_NEGATIVE(input[i])) - coeffs[i] |= 1 << 15; + csc->coeff[i] |= 1 << 15; if (abs_coeff < CTM_COEFF_0_125) - coeffs[i] |= (3 << 12) | + csc->coeff[i] |= (3 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 12); else if (abs_coeff < CTM_COEFF_0_25) - coeffs[i] |= (2 << 12) | + csc->coeff[i] |= (2 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 11); else if (abs_coeff < CTM_COEFF_0_5) - coeffs[i] |= (1 << 12) | + csc->coeff[i] |= (1 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 10); else if (abs_coeff < CTM_COEFF_1_0) - coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); + csc->coeff[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); else if (abs_coeff < CTM_COEFF_2_0) - coeffs[i] |= (7 << 12) | + csc->coeff[i] |= (7 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 8); else - coeffs[i] |= (6 << 12) | + csc->coeff[i] |= (6 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 7); } } -static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { - u16 coeff[9]; + drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); - ilk_csc_convert_ctm(crtc_state, coeff, limited_color_range); - ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff, - limited_color_range ? - ilk_csc_postoff_limited_range : - ilk_csc_off_zero); + ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range); } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - ilk_update_pipe_csc(crtc, ilk_csc_off_zero, - ilk_csc_coeff_rgb_to_ycbcr, - ilk_csc_postoff_rgb_to_ycbcr); + drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + + ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (limited_color_range) { - ilk_update_pipe_csc(crtc, ilk_csc_off_zero, - ilk_csc_coeff_limited_range, - ilk_csc_postoff_limited_range); + drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + + ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range); } else if (crtc_state->csc_enable) { /* * On GLK both pipe CSC and degamma LUT are controlled @@ -377,72 +514,267 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) */ drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915)); - ilk_update_pipe_csc(crtc, ilk_csc_off_zero, - ilk_csc_coeff_identity, - ilk_csc_off_zero); + ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity); + } else { + intel_csc_clear(&crtc_state->csc); } } -static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (crtc_state->csc_enable) + ilk_update_pipe_csc(crtc, &crtc_state->csc); +} + +static void icl_assign_csc(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + if (crtc_state->hw.ctm) { - u16 coeff[9]; + drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); + + ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false); + } else { + drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); - ilk_csc_convert_ctm(crtc_state, coeff, false); - ilk_update_pipe_csc(crtc, ilk_csc_off_zero, - coeff, ilk_csc_off_zero); + intel_csc_clear(&crtc_state->csc); } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - icl_update_output_csc(crtc, ilk_csc_off_zero, - ilk_csc_coeff_rgb_to_ycbcr, - ilk_csc_postoff_rgb_to_ycbcr); + drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + + ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (crtc_state->limited_color_range) { - icl_update_output_csc(crtc, ilk_csc_off_zero, - ilk_csc_coeff_limited_range, - ilk_csc_postoff_limited_range); + drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + + ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); + } else { + drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); + + intel_csc_clear(&crtc_state->output_csc); } } -static void chv_load_cgm_csc(struct intel_crtc *crtc, - const struct drm_property_blob *blob) +static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - const struct drm_color_ctm *ctm = blob->data; - enum pipe pipe = crtc->pipe; - u16 coeffs[9]; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->csc_mode & ICL_CSC_ENABLE) + ilk_update_pipe_csc(crtc, &crtc_state->csc); + + if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) + icl_update_output_csc(crtc, &crtc_state->output_csc); +} + +static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits) +{ + s64 c = CTM_COEFF_ABS(coeff); + + /* leave an extra bit for rounding */ + c >>= 32 - frac_bits - 1; + + /* round and drop the extra bit */ + c = (c + 1) >> 1; + + if (CTM_COEFF_NEGATIVE(coeff)) + c = -c; + + c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1), + (s64)(BIT(int_bits + frac_bits - 1) - 1)); + + return c & (BIT(int_bits + frac_bits) - 1); +} + +/* + * VLV/CHV Wide Gamut Color Correction (WGC) CSC + * |r| | c0 c1 c2 | |r| + * |g| = | c3 c4 c5 | x |g| + * |b| | c6 c7 c8 | |b| + * + * Coefficients are two's complement s2.10. + */ +static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state, + struct intel_csc_matrix *csc) +{ + const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; int i; - for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; + for (i = 0; i < 9; i++) + csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 2, 10); +} + +static void vlv_load_wgc_csc(struct intel_crtc *crtc, + const struct intel_csc_matrix *csc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe), + csc->coeff[1] << 16 | csc->coeff[0]); + intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe), + csc->coeff[2]); + + intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe), + csc->coeff[4] << 16 | csc->coeff[3]); + intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe), + csc->coeff[5]); + + intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe), + csc->coeff[7] << 16 | csc->coeff[6]); + intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe), + csc->coeff[8]); +} + +static void vlv_read_wgc_csc(struct intel_crtc *crtc, + struct intel_csc_matrix *csc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 tmp; + + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe)); + csc->coeff[0] = tmp & 0xffff; + csc->coeff[1] = tmp >> 16; - /* Round coefficient. */ - abs_coeff += 1 << (32 - 13); - /* Clamp to hardware limits. */ - abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe)); + csc->coeff[2] = tmp & 0xffff; - coeffs[i] = 0; + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe)); + csc->coeff[3] = tmp & 0xffff; + csc->coeff[4] = tmp >> 16; - /* Write coefficients in S3.12 format. */ - if (ctm->matrix[i] & (1ULL << 63)) - coeffs[i] |= 1 << 15; + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe)); + csc->coeff[5] = tmp & 0xffff; - coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; - coeffs[i] |= (abs_coeff >> 20) & 0xfff; + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe)); + csc->coeff[6] = tmp & 0xffff; + csc->coeff[7] = tmp >> 16; + + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe)); + csc->coeff[8] = tmp & 0xffff; +} + +static void vlv_read_csc(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->wgc_enable) + vlv_read_wgc_csc(crtc, &crtc_state->csc); +} + +static void vlv_assign_csc(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + if (crtc_state->hw.ctm) { + drm_WARN_ON(&i915->drm, !crtc_state->wgc_enable); + + vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc); + } else { + drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + + intel_csc_clear(&crtc_state->csc); } +} + +/* + * CHV Color Gamut Mapping (CGM) CSC + * |r| | c0 c1 c2 | |r| + * |g| = | c3 c4 c5 | x |g| + * |b| | c6 c7 c8 | |b| + * + * Coefficients are two's complement s4.12. + */ +static void chv_cgm_csc_convert_ctm(const struct intel_crtc_state *crtc_state, + struct intel_csc_matrix *csc) +{ + const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; + int i; + + for (i = 0; i < 9; i++) + csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 4, 12); +} + +#define CHV_CGM_CSC_COEFF_1_0 (1 << 12) + +static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = { + .coeff = { + CHV_CGM_CSC_COEFF_1_0, 0, 0, + 0, CHV_CGM_CSC_COEFF_1_0, 0, + 0, 0, CHV_CGM_CSC_COEFF_1_0, + }, +}; + +static void chv_load_cgm_csc(struct intel_crtc *crtc, + const struct intel_csc_matrix *csc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe), - coeffs[1] << 16 | coeffs[0]); + csc->coeff[1] << 16 | csc->coeff[0]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe), - coeffs[3] << 16 | coeffs[2]); + csc->coeff[3] << 16 | csc->coeff[2]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe), - coeffs[5] << 16 | coeffs[4]); + csc->coeff[5] << 16 | csc->coeff[4]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe), - coeffs[7] << 16 | coeffs[6]); + csc->coeff[7] << 16 | csc->coeff[6]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe), - coeffs[8]); + csc->coeff[8]); +} + +static void chv_read_cgm_csc(struct intel_crtc *crtc, + struct intel_csc_matrix *csc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 tmp; + + tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe)); + csc->coeff[0] = tmp & 0xffff; + csc->coeff[1] = tmp >> 16; + + tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe)); + csc->coeff[2] = tmp & 0xffff; + csc->coeff[3] = tmp >> 16; + + tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe)); + csc->coeff[4] = tmp & 0xffff; + csc->coeff[5] = tmp >> 16; + + tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe)); + csc->coeff[6] = tmp & 0xffff; + csc->coeff[7] = tmp >> 16; + + tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe)); + csc->coeff[8] = tmp & 0xffff; +} + +static void chv_read_csc(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) + chv_read_cgm_csc(crtc, &crtc_state->csc); +} + +static void chv_assign_csc(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + + if (crtc_state->hw.ctm) { + drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + + chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc); + } else { + drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + + crtc_state->csc = chv_cgm_csc_matrix_identity; + } } /* convert hw value with given bit_precision to lut property val */ @@ -1336,6 +1668,16 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) } } +static void vlv_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->wgc_enable) + vlv_load_wgc_csc(crtc, &crtc_state->csc); + + i965_load_luts(crtc_state); +} + static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, drm_color_lut_extract(color->green, 14)) | @@ -1410,10 +1752,9 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; - const struct drm_property_blob *ctm = crtc_state->hw.ctm; if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) - chv_load_cgm_csc(crtc, ctm); + chv_load_cgm_csc(crtc, &crtc_state->csc); if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) chv_load_cgm_degamma(crtc, pre_csc_lut); @@ -1491,6 +1832,18 @@ static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state !old_crtc_state->pre_csc_lut; } +static bool vlv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->uapi.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + return !old_crtc_state->wgc_enable && + !old_crtc_state->post_csc_lut; +} + static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); @@ -1507,7 +1860,7 @@ static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode) return false; - return !old_crtc_state->post_csc_lut; + return vlv_can_preload_luts(new_crtc_state); } int intel_color_check(struct intel_crtc_state *crtc_state) @@ -1522,6 +1875,9 @@ void intel_color_get_config(struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); i915->display.funcs.color->read_luts(crtc_state); + + if (i915->display.funcs.color->read_csc) + i915->display.funcs.color->read_csc(crtc_state); } bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, @@ -1606,14 +1962,14 @@ static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state) if (lut_is_legacy(gamma_lut)) return 0; - return INTEL_INFO(i915)->display.color.gamma_lut_tests; + return DISPLAY_INFO(i915)->color.gamma_lut_tests; } static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - return INTEL_INFO(i915)->display.color.degamma_lut_tests; + return DISPLAY_INFO(i915)->color.degamma_lut_tests; } static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state) @@ -1624,14 +1980,14 @@ static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state) if (lut_is_legacy(gamma_lut)) return LEGACY_LUT_LENGTH; - return INTEL_INFO(i915)->display.color.gamma_lut_size; + return DISPLAY_INFO(i915)->color.gamma_lut_size; } static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - return INTEL_INFO(i915)->display.color.degamma_lut_size; + return DISPLAY_INFO(i915)->color.degamma_lut_size; } static int check_lut_size(const struct drm_property_blob *lut, int expected) @@ -1791,6 +2147,39 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state) return 0; } +/* + * VLV color pipeline: + * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10 + */ +static int vlv_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_enable = + crtc_state->hw.gamma_lut && + !crtc_state->c8_planes; + + crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); + + crtc_state->wgc_enable = crtc_state->hw.ctm; + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + intel_assign_luts(crtc_state); + + vlv_assign_csc(crtc_state); + + crtc_state->preload_luts = vlv_can_preload_luts(crtc_state); + + return 0; +} + static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) { u32 cgm_mode = 0; @@ -1803,6 +2192,13 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) !lut_is_legacy(crtc_state->hw.gamma_lut)) cgm_mode |= CGM_PIPE_MODE_GAMMA; + /* + * Toggling the CGM CSC on/off outside of the tiny window + * between start of vblank and frame start causes underruns. + * Always enable the CGM CSC as a workaround. + */ + cgm_mode |= CGM_PIPE_MODE_CSC; + return cgm_mode; } @@ -1834,12 +2230,20 @@ static int chv_color_check(struct intel_crtc_state *crtc_state) crtc_state->cgm_mode = chv_cgm_mode(crtc_state); + /* + * We always bypass the WGC CSC and use the CGM CSC + * instead since it has degamma and better precision. + */ + crtc_state->wgc_enable = false; + ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; intel_assign_luts(crtc_state); + chv_assign_csc(crtc_state); + crtc_state->preload_luts = chv_can_preload_luts(crtc_state); return 0; @@ -1962,6 +2366,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + ilk_assign_csc(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; @@ -2068,6 +2474,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + ilk_assign_csc(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; @@ -2097,7 +2505,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state) struct drm_property_blob *gamma_lut; gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, - INTEL_INFO(i915)->display.color.degamma_lut_size, + DISPLAY_INFO(i915)->color.degamma_lut_size, false); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); @@ -2199,6 +2607,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + ilk_assign_csc(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; @@ -2261,6 +2671,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) intel_assign_luts(crtc_state); + icl_assign_csc(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; @@ -2627,7 +3039,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size; + u32 lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -2676,7 +3088,7 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size; + int i, lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -2726,7 +3138,7 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size; + int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -2752,7 +3164,7 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size; + int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -2816,7 +3228,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size; + int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -3000,7 +3412,7 @@ static void bdw_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size; + int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -3065,7 +3477,7 @@ static struct drm_property_blob * icl_read_lut_multi_segment(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size; + int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; @@ -3135,6 +3547,16 @@ static const struct intel_color_funcs chv_color_funcs = { .load_luts = chv_load_luts, .read_luts = chv_read_luts, .lut_equal = chv_lut_equal, + .read_csc = chv_read_csc, +}; + +static const struct intel_color_funcs vlv_color_funcs = { + .color_check = vlv_color_check, + .color_commit_arm = i9xx_color_commit_arm, + .load_luts = vlv_load_luts, + .read_luts = i965_read_luts, + .lut_equal = i965_lut_equal, + .read_csc = vlv_read_csc, }; static const struct intel_color_funcs i965_color_funcs = { @@ -3160,6 +3582,7 @@ static const struct intel_color_funcs tgl_color_funcs = { .load_luts = icl_load_luts, .read_luts = icl_read_luts, .lut_equal = icl_lut_equal, + .read_csc = icl_read_csc, }; static const struct intel_color_funcs icl_color_funcs = { @@ -3170,6 +3593,7 @@ static const struct intel_color_funcs icl_color_funcs = { .load_luts = icl_load_luts, .read_luts = icl_read_luts, .lut_equal = icl_lut_equal, + .read_csc = icl_read_csc, }; static const struct intel_color_funcs glk_color_funcs = { @@ -3179,6 +3603,7 @@ static const struct intel_color_funcs glk_color_funcs = { .load_luts = glk_load_luts, .read_luts = glk_read_luts, .lut_equal = glk_lut_equal, + .read_csc = skl_read_csc, }; static const struct intel_color_funcs skl_color_funcs = { @@ -3188,6 +3613,7 @@ static const struct intel_color_funcs skl_color_funcs = { .load_luts = bdw_load_luts, .read_luts = bdw_read_luts, .lut_equal = ivb_lut_equal, + .read_csc = skl_read_csc, }; static const struct intel_color_funcs bdw_color_funcs = { @@ -3197,6 +3623,7 @@ static const struct intel_color_funcs bdw_color_funcs = { .load_luts = bdw_load_luts, .read_luts = bdw_read_luts, .lut_equal = ivb_lut_equal, + .read_csc = ilk_read_csc, }; static const struct intel_color_funcs hsw_color_funcs = { @@ -3206,6 +3633,7 @@ static const struct intel_color_funcs hsw_color_funcs = { .load_luts = ivb_load_luts, .read_luts = ivb_read_luts, .lut_equal = ivb_lut_equal, + .read_csc = ilk_read_csc, }; static const struct intel_color_funcs ivb_color_funcs = { @@ -3215,6 +3643,7 @@ static const struct intel_color_funcs ivb_color_funcs = { .load_luts = ivb_load_luts, .read_luts = ivb_read_luts, .lut_equal = ivb_lut_equal, + .read_csc = ilk_read_csc, }; static const struct intel_color_funcs ilk_color_funcs = { @@ -3224,6 +3653,7 @@ static const struct intel_color_funcs ilk_color_funcs = { .load_luts = ilk_load_luts, .read_luts = ilk_read_luts, .lut_equal = ilk_lut_equal, + .read_csc = ilk_read_csc, }; void intel_color_crtc_init(struct intel_crtc *crtc) @@ -3234,9 +3664,9 @@ void intel_color_crtc_init(struct intel_crtc *crtc) drm_mode_crtc_set_gamma_size(&crtc->base, 256); - gamma_lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size; - degamma_lut_size = INTEL_INFO(i915)->display.color.degamma_lut_size; - has_ctm = degamma_lut_size != 0; + gamma_lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + degamma_lut_size = DISPLAY_INFO(i915)->color.degamma_lut_size; + has_ctm = DISPLAY_VER(i915) >= 5; /* * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the @@ -3260,7 +3690,8 @@ int intel_color_init(struct drm_i915_private *i915) if (DISPLAY_VER(i915) != 10) return 0; - blob = create_linear_lut(i915, INTEL_INFO(i915)->display.color.degamma_lut_size); + blob = create_linear_lut(i915, + DISPLAY_INFO(i915)->color.degamma_lut_size); if (IS_ERR(blob)) return PTR_ERR(blob); @@ -3274,6 +3705,8 @@ void intel_color_init_hooks(struct drm_i915_private *i915) if (HAS_GMCH(i915)) { if (IS_CHERRYVIEW(i915)) i915->display.funcs.color = &chv_color_funcs; + else if (IS_VALLEYVIEW(i915)) + i915->display.funcs.color = &vlv_color_funcs; else if (DISPLAY_VER(i915) >= 4) i915->display.funcs.color = &i965_color_funcs; else diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 257afac34839..00ea71b03ec7 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -176,15 +176,15 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector) /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use - * @edid: previously read EDID information + * @drm_edid: previously read EDID information */ int intel_connector_update_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int ret; - drm_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); + drm_edid_connector_update(connector, drm_edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -199,15 +199,15 @@ int intel_connector_update_modes(struct drm_connector *connector, int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct edid *edid; + const struct drm_edid *drm_edid; int ret; - edid = drm_get_edid(connector, adapter); - if (!edid) + drm_edid = drm_edid_read_ddc(connector, adapter); + if (!drm_edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); + ret = intel_connector_update_modes(connector, drm_edid); + drm_edid_free(drm_edid); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h index 9d2bc261b204..aaf7281462dc 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.h +++ b/drivers/gpu/drm/i915/display/intel_connector.h @@ -9,7 +9,7 @@ #include <linux/types.h> struct drm_connector; -struct edid; +struct drm_edid; struct i2c_adapter; struct intel_connector; struct intel_encoder; @@ -25,7 +25,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, bool intel_connector_get_hw_state(struct intel_connector *connector); enum pipe intel_connector_get_pipe(struct intel_connector *connector); int intel_connector_update_modes(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); void intel_attach_force_audio_property(struct drm_connector *connector); void intel_attach_broadcast_rgb_property(struct drm_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 38e9c61c2344..ab7cd5e60a0a 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -48,6 +48,8 @@ #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" +#include "intel_hotplug_irq.h" +#include "intel_load_detect.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" @@ -394,6 +396,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return 0; @@ -606,37 +609,38 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } -static struct edid *intel_crt_get_edid(struct drm_connector *connector, - struct i2c_adapter *i2c) +static const struct drm_edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) { - struct edid *edid; + const struct drm_edid *drm_edid; - edid = drm_get_edid(connector, i2c); + drm_edid = drm_edid_read_ddc(connector, i2c); - if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) { drm_dbg_kms(connector->dev, "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(i2c, true); - edid = drm_get_edid(connector, i2c); + drm_edid = drm_edid_read_ddc(connector, i2c); intel_gmbus_force_bit(i2c, false); } - return edid; + return drm_edid; } /* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ static int intel_crt_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct edid *edid; + const struct drm_edid *drm_edid; int ret; - edid = intel_crt_get_edid(connector, adapter); - if (!edid) + drm_edid = intel_crt_get_edid(connector, adapter); + if (!drm_edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); + ret = intel_connector_update_modes(connector, drm_edid); + + drm_edid_free(drm_edid); return ret; } @@ -645,14 +649,15 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); - struct edid *edid; + const struct drm_edid *drm_edid; struct i2c_adapter *i2c; bool ret = false; i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); - edid = intel_crt_get_edid(connector, i2c); + drm_edid = intel_crt_get_edid(connector, i2c); - if (edid) { + if (drm_edid) { + const struct edid *edid = drm_edid_raw(drm_edid); bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; /* @@ -673,7 +678,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } - kfree(edid); + drm_edid_free(drm_edid); return ret; } @@ -821,9 +826,9 @@ intel_crt_detect(struct drm_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; + struct drm_atomic_state *state; intel_wakeref_t wakeref; - int status, ret; - struct intel_load_detect_pipe tmp; + int status; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, @@ -881,8 +886,12 @@ load_detect: } /* for pre-945g platforms use load detect */ - ret = intel_get_load_detect_pipe(connector, &tmp, ctx); - if (ret > 0) { + state = intel_load_detect_get_pipe(connector, ctx); + if (IS_ERR(state)) { + status = PTR_ERR(state); + } else if (!state) { + status = connector_status_unknown; + } else { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else if (DISPLAY_VER(dev_priv) < 4) @@ -892,11 +901,7 @@ load_detect: status = connector_status_disconnected; else status = connector_status_unknown; - intel_release_load_detect_pipe(connector, &tmp, ctx); - } else if (ret == 0) { - status = connector_status_unknown; - } else { - status = ret; + intel_load_detect_release_pipe(connector, state, ctx); } out: diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index ed45a6934854..182c6dd64f47 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -11,7 +11,6 @@ #include <drm/drm_plane.h> #include <drm/drm_vblank_work.h> -#include "i915_irq.h" #include "i915_vgpu.h" #include "i9xx_plane.h" #include "icl_dsi.h" @@ -21,6 +20,7 @@ #include "intel_crtc.h" #include "intel_cursor.h" #include "intel_display_debugfs.h" +#include "intel_display_irq.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_drrs.h" @@ -35,7 +35,11 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) { - if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) + struct drm_i915_private *i915 = to_i915(crtc->dev); + + if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, + "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", + crtc->base.id, crtc->name)) drm_crtc_vblank_put(crtc); } @@ -302,7 +306,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) return PTR_ERR(crtc); crtc->pipe = pipe; - crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; + crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe]; if (DISPLAY_VER(dev_priv) >= 9) primary = skl_universal_plane_create(dev_priv, pipe, @@ -510,6 +514,13 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) VBLANK_EVASION_TIME_US); max = vblank_start - 1; + /* + * M/N is double buffered on the transcoder's undelayed vblank, + * so with seamless M/N we must evade both vblanks. + */ + if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) + min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; + if (min <= 0 || max <= 0) goto irq_disable; @@ -692,7 +703,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) * FIXME Should be synchronized with the start of vblank somehow... */ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) - intel_crtc_update_active_timings(new_crtc_state); + intel_crtc_update_active_timings(new_crtc_state, + new_crtc_state->vrr.enable); local_irq_enable(); diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index 73077137fb99..51a4c8df9e65 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -16,6 +16,16 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +/* + * FIXME: We should instead only take spinlocks once for the entire update + * instead of once per mmio. + */ +#if IS_ENABLED(CONFIG_PROVE_LOCKING) +#define VBLANK_EVASION_TIME_US 250 +#else +#define VBLANK_EVASION_TIME_US 100 +#endif + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 54c8adc0702e..8d4640d0fd34 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -158,6 +158,45 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) DRM_RECT_ARG(&plane_state->uapi.dst)); } +static void +ilk_dump_csc(struct drm_i915_private *i915, const char *name, + const struct intel_csc_matrix *csc) +{ + int i; + + drm_dbg_kms(&i915->drm, + "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name, + csc->preoff[0], csc->preoff[1], csc->preoff[2]); + + for (i = 0; i < 3; i++) + drm_dbg_kms(&i915->drm, + "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, + csc->coeff[3 * i + 0], + csc->coeff[3 * i + 1], + csc->coeff[3 * i + 2]); + + if (DISPLAY_VER(i915) < 7) + return; + + drm_dbg_kms(&i915->drm, + "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name, + csc->postoff[0], csc->postoff[1], csc->postoff[2]); +} + +static void +vlv_dump_csc(struct drm_i915_private *i915, const char *name, + const struct intel_csc_matrix *csc) +{ + int i; + + for (i = 0; i < 3; i++) + drm_dbg_kms(&i915->drm, + "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, + csc->coeff[3 * i + 0], + csc->coeff[3 * i + 1], + csc->coeff[3 * i + 2]); +} + void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, struct intel_atomic_state *state, const char *context) @@ -178,10 +217,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); drm_dbg_kms(&i915->drm, - "active: %s, output_types: %s (0x%x), output format: %s\n", + "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n", str_yes_no(pipe_config->hw.active), buf, pipe_config->output_types, - intel_output_format_name(pipe_config->output_format)); + intel_output_format_name(pipe_config->output_format), + intel_output_format_name(pipe_config->sink_format)); drm_dbg_kms(&i915->drm, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", @@ -325,6 +365,16 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, pipe_config->post_csc_lut ? drm_color_lut_size(pipe_config->post_csc_lut) : 0); + if (DISPLAY_VER(i915) >= 11) + ilk_dump_csc(i915, "output csc", &pipe_config->output_csc); + + if (!HAS_GMCH(i915)) + ilk_dump_csc(i915, "pipe csc", &pipe_config->csc); + else if (IS_CHERRYVIEW(i915)) + vlv_dump_csc(i915, "cgm csc", &pipe_config->csc); + else if (IS_VALLEYVIEW(i915)) + vlv_dump_csc(i915, "wgc csc", &pipe_config->csc); + dump_planes: if (!state) return; diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 31bef0427377..b342fad180ca 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -36,7 +36,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) const struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 base; - if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) + if (DISPLAY_INFO(dev_priv)->cursor_needs_physical) base = sg_dma_address(obj->mm.pages->sgl); else base = intel_plane_ggtt_offset(plane_state); @@ -814,7 +814,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); - zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; + zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; drm_plane_create_zpos_immutable_property(&cursor->base, zpos); if (DISPLAY_VER(dev_priv) >= 12) diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c new file mode 100644 index 000000000000..0600fdcd06ef --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -0,0 +1,3046 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/log2.h> +#include <linux/math64.h> +#include "i915_reg.h" +#include "intel_cx0_phy.h" +#include "intel_cx0_phy_regs.h" +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_hdmi.h" +#include "intel_panel.h" +#include "intel_psr.h" +#include "intel_tc.h" + +#define MB_WRITE_COMMITTED true +#define MB_WRITE_UNCOMMITTED false + +#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \ + for ((__lane) = 0; (__lane) < 2; (__lane)++) \ + for_each_if((__lane_mask) & BIT(__lane)) + +#define INTEL_CX0_LANE0 BIT(0) +#define INTEL_CX0_LANE1 BIT(1) +#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0) + +bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy) +{ + if (IS_METEORLAKE(i915) && (phy < PHY_C)) + return true; + + return false; +} + +static int lane_mask_to_lane(u8 lane_mask) +{ + if (WARN_ON((lane_mask & ~INTEL_CX0_BOTH_LANES) || + hweight8(lane_mask) != 1)) + return 0; + + return ilog2(lane_mask); +} + +static void +assert_dc_off(struct drm_i915_private *i915) +{ + bool enabled; + + enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF); + drm_WARN_ON(&i915->drm, !enabled); +} + +/* + * Prepare HW for CX0 phy transactions. + * + * It is required that PSR and DC5/6 are disabled before any CX0 message + * bus transaction is executed. + */ +static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_psr_pause(intel_dp); + return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF); +} + +static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_psr_resume(intel_dp); + intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref); +} + +static void intel_clear_response_ready_flag(struct drm_i915_private *i915, + enum port port, int lane) +{ + intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), + 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); +} + +static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane) +{ + enum phy phy = intel_port_to_phy(i915, port); + + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_RESET); + + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_RESET, + XELPDP_MSGBUS_TIMEOUT_SLOW)) { + drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); + return; + } + + intel_clear_response_ready_flag(i915, port, lane); +} + +static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, + int command, int lane, u32 *val) +{ + enum phy phy = intel_port_to_phy(i915, port); + + if (__intel_de_wait_for_register(i915, + XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_MSGBUS_TIMEOUT_FAST_US, + XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { + drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", + phy_name(phy), *val); + return -ETIMEDOUT; + } + + if (*val & XELPDP_PORT_P2M_ERROR_SET) { + drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), + command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); + intel_cx0_bus_reset(i915, port, lane); + return -EINVAL; + } + + if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { + drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), + command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); + intel_cx0_bus_reset(i915, port, lane); + return -EINVAL; + } + + return 0; +} + +static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, + int lane, u16 addr) +{ + enum phy phy = intel_port_to_phy(i915, port); + int ack; + u32 val; + + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_SLOW)) { + drm_dbg_kms(&i915->drm, + "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); + intel_cx0_bus_reset(i915, port, lane); + return -ETIMEDOUT; + } + + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING | + XELPDP_PORT_M2P_COMMAND_READ | + XELPDP_PORT_M2P_ADDRESS(addr)); + + ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); + if (ack < 0) { + intel_cx0_bus_reset(i915, port, lane); + return ack; + } + + intel_clear_response_ready_flag(i915, port, lane); + + return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); +} + +static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port, + int lane, u16 addr) +{ + enum phy phy = intel_port_to_phy(i915, port); + int i, status; + + assert_dc_off(i915); + + /* 3 tries is assumed to be enough to read successfully */ + for (i = 0; i < 3; i++) { + status = __intel_cx0_read_once(i915, port, lane, addr); + + if (status >= 0) + return status; + } + + drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", + phy_name(phy), addr, i); + + return 0; +} + +static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port, + u8 lane_mask, u16 addr) +{ + int lane = lane_mask_to_lane(lane_mask); + + return __intel_cx0_read(i915, port, lane, addr); +} + +static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, + int lane, u16 addr, u8 data, bool committed) +{ + enum phy phy = intel_port_to_phy(i915, port); + u32 val; + + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_SLOW)) { + drm_dbg_kms(&i915->drm, + "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); + intel_cx0_bus_reset(i915, port, lane); + return -ETIMEDOUT; + } + + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING | + (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : + XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | + XELPDP_PORT_M2P_DATA(data) | + XELPDP_PORT_M2P_ADDRESS(addr)); + + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_SLOW)) { + drm_dbg_kms(&i915->drm, + "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); + intel_cx0_bus_reset(i915, port, lane); + return -ETIMEDOUT; + } + + if (committed) { + if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) { + intel_cx0_bus_reset(i915, port, lane); + return -EINVAL; + } + } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) & + XELPDP_PORT_P2M_ERROR_SET)) { + drm_dbg_kms(&i915->drm, + "PHY %c Error occurred during write command.\n", phy_name(phy)); + intel_cx0_bus_reset(i915, port, lane); + return -EINVAL; + } + + intel_clear_response_ready_flag(i915, port, lane); + + return 0; +} + +static void __intel_cx0_write(struct drm_i915_private *i915, enum port port, + int lane, u16 addr, u8 data, bool committed) +{ + enum phy phy = intel_port_to_phy(i915, port); + int i, status; + + assert_dc_off(i915); + + /* 3 tries is assumed to be enough to write successfully */ + for (i = 0; i < 3; i++) { + status = __intel_cx0_write_once(i915, port, lane, addr, data, committed); + + if (status == 0) + return; + } + + drm_err_once(&i915->drm, + "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); +} + +static void intel_cx0_write(struct drm_i915_private *i915, enum port port, + u8 lane_mask, u16 addr, u8 data, bool committed) +{ + int lane; + + for_each_cx0_lane_in_mask(lane_mask, lane) + __intel_cx0_write(i915, port, lane, addr, data, committed); +} + +static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port, + int lane, u16 addr, u16 data) +{ + assert_dc_off(i915); + + intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); + intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); + + intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0); + intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1); +} + +static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port, + int lane, u16 addr) +{ + u16 val; + + assert_dc_off(i915); + + intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); + intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); + + val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H); + val <<= 8; + val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L); + + return val; +} + +static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port, + int lane, u16 addr, u8 clear, u8 set, bool committed) +{ + u8 old, val; + + old = __intel_cx0_read(i915, port, lane, addr); + val = (old & ~clear) | set; + + if (val != old) + __intel_cx0_write(i915, port, lane, addr, val, committed); +} + +static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port, + u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) +{ + u8 lane; + + for_each_cx0_lane_in_mask(lane_mask, lane) + __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed); +} + +static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + (crtc_state->port_clock == 540000 || + crtc_state->port_clock == 810000)) + return 5; + else + return 4; + } else { + return 5; + } +} + +static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + (crtc_state->port_clock == 540000 || + crtc_state->port_clock == 810000)) + return 5; + else + return 2; + } else { + return 6; + } +} + +void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_ddi_buf_trans *trans; + enum phy phy = intel_port_to_phy(i915, encoder->port); + intel_wakeref_t wakeref; + int n_entries, ln; + + wakeref = intel_cx0_phy_transaction_begin(encoder); + + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&i915->drm, !trans)) { + intel_cx0_phy_transaction_end(encoder, wakeref); + return; + } + + if (intel_is_c10phy(i915, phy)) { + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CMN(3), + C10_CMN3_TXVBOOST_MASK, + C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)), + MB_WRITE_UNCOMMITTED); + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_TX(1), + C10_TX1_TERMCTL_MASK, + C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)), + MB_WRITE_COMMITTED); + } + + for (ln = 0; ln < crtc_state->lane_count; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + int lane, tx; + + lane = ln / 2; + tx = ln % 2; + + intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 0), + C10_PHY_OVRD_LEVEL_MASK, + C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor), + MB_WRITE_COMMITTED); + intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 1), + C10_PHY_OVRD_LEVEL_MASK, + C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing), + MB_WRITE_COMMITTED); + intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 2), + C10_PHY_OVRD_LEVEL_MASK, + C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor), + MB_WRITE_COMMITTED); + } + + /* Write Override enables in 0xD71 */ + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_OVRD, + 0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2, + MB_WRITE_COMMITTED); + + if (intel_is_c10phy(i915, phy)) + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); + + intel_cx0_phy_transaction_end(encoder, wakeref); +} + +/* + * Basic DP link rates with 38.4 MHz reference clock. + * Note: The tables below are with SSC. In non-ssc + * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be + * programmed 0. + */ + +static const struct intel_c10pll_state mtl_c10_dp_rbr = { + .clock = 162000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0xB4, + .pll[1] = 0, + .pll[2] = 0x30, + .pll[3] = 0x1, + .pll[4] = 0x26, + .pll[5] = 0x0C, + .pll[6] = 0x98, + .pll[7] = 0x46, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xC0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x2, + .pll[16] = 0x84, + .pll[17] = 0x4F, + .pll[18] = 0xE5, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_edp_r216 = { + .clock = 216000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0x4, + .pll[1] = 0, + .pll[2] = 0xA2, + .pll[3] = 0x1, + .pll[4] = 0x33, + .pll[5] = 0x10, + .pll[6] = 0x75, + .pll[7] = 0xB3, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x2, + .pll[16] = 0x85, + .pll[17] = 0x0F, + .pll[18] = 0xE6, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_edp_r243 = { + .clock = 243000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0x34, + .pll[1] = 0, + .pll[2] = 0xDA, + .pll[3] = 0x1, + .pll[4] = 0x39, + .pll[5] = 0x12, + .pll[6] = 0xE3, + .pll[7] = 0xE9, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0x20, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x2, + .pll[16] = 0x85, + .pll[17] = 0x8F, + .pll[18] = 0xE6, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_dp_hbr1 = { + .clock = 270000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0xF4, + .pll[1] = 0, + .pll[2] = 0xF8, + .pll[3] = 0x0, + .pll[4] = 0x20, + .pll[5] = 0x0A, + .pll[6] = 0x29, + .pll[7] = 0x10, + .pll[8] = 0x1, /* Verify */ + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xA0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x1, + .pll[16] = 0x84, + .pll[17] = 0x4F, + .pll[18] = 0xE5, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_edp_r324 = { + .clock = 324000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0xB4, + .pll[1] = 0, + .pll[2] = 0x30, + .pll[3] = 0x1, + .pll[4] = 0x26, + .pll[5] = 0x0C, + .pll[6] = 0x98, + .pll[7] = 0x46, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xC0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x1, + .pll[16] = 0x85, + .pll[17] = 0x4F, + .pll[18] = 0xE6, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_edp_r432 = { + .clock = 432000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0x4, + .pll[1] = 0, + .pll[2] = 0xA2, + .pll[3] = 0x1, + .pll[4] = 0x33, + .pll[5] = 0x10, + .pll[6] = 0x75, + .pll[7] = 0xB3, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x1, + .pll[16] = 0x85, + .pll[17] = 0x0F, + .pll[18] = 0xE6, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_dp_hbr2 = { + .clock = 540000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0xF4, + .pll[1] = 0, + .pll[2] = 0xF8, + .pll[3] = 0, + .pll[4] = 0x20, + .pll[5] = 0x0A, + .pll[6] = 0x29, + .pll[7] = 0x10, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xA0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0, + .pll[16] = 0x84, + .pll[17] = 0x4F, + .pll[18] = 0xE5, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_edp_r675 = { + .clock = 675000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0xB4, + .pll[1] = 0, + .pll[2] = 0x3E, + .pll[3] = 0x1, + .pll[4] = 0xA8, + .pll[5] = 0x0C, + .pll[6] = 0x33, + .pll[7] = 0x54, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xC8, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0, + .pll[16] = 0x85, + .pll[17] = 0x8F, + .pll[18] = 0xE6, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_dp_hbr3 = { + .clock = 810000, + .tx = 0x10, + .cmn = 0x21, + .pll[0] = 0x34, + .pll[1] = 0, + .pll[2] = 0x84, + .pll[3] = 0x1, + .pll[4] = 0x30, + .pll[5] = 0x0F, + .pll[6] = 0x3D, + .pll[7] = 0x98, + .pll[8] = 0x1, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0xF0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0, + .pll[16] = 0x84, + .pll[17] = 0x0F, + .pll[18] = 0xE5, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = { + &mtl_c10_dp_rbr, + &mtl_c10_dp_hbr1, + &mtl_c10_dp_hbr2, + &mtl_c10_dp_hbr3, + NULL, +}; + +static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = { + &mtl_c10_dp_rbr, + &mtl_c10_edp_r216, + &mtl_c10_edp_r243, + &mtl_c10_dp_hbr1, + &mtl_c10_edp_r324, + &mtl_c10_edp_r432, + &mtl_c10_dp_hbr2, + &mtl_c10_edp_r675, + &mtl_c10_dp_hbr3, + NULL, +}; + +/* C20 basic DP 1.4 tables */ +static const struct intel_c20pll_state mtl_c20_dp_rbr = { + .link_bit_rate = 162000, + .clock = 162000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x5800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x50a8, /* mpllb cfg0 */ + 0x2120, /* mpllb cfg1 */ + 0xcd9a, /* mpllb cfg2 */ + 0xbfc1, /* mpllb cfg3 */ + 0x5ab8, /* mpllb cfg4 */ + 0x4c34, /* mpllb cfg5 */ + 0x2000, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x6000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0000, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_dp_hbr1 = { + .link_bit_rate = 270000, + .clock = 270000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x308c, /* mpllb cfg0 */ + 0x2110, /* mpllb cfg1 */ + 0xcc9c, /* mpllb cfg2 */ + 0xbfc1, /* mpllb cfg3 */ + 0x4b9a, /* mpllb cfg4 */ + 0x3f81, /* mpllb cfg5 */ + 0x2000, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x5000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0000, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_dp_hbr2 = { + .link_bit_rate = 540000, + .clock = 540000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x108c, /* mpllb cfg0 */ + 0x2108, /* mpllb cfg1 */ + 0xcc9c, /* mpllb cfg2 */ + 0xbfc1, /* mpllb cfg3 */ + 0x4b9a, /* mpllb cfg4 */ + 0x3f81, /* mpllb cfg5 */ + 0x2000, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x5000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0000, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { + .link_bit_rate = 810000, + .clock = 810000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x10d2, /* mpllb cfg0 */ + 0x2108, /* mpllb cfg1 */ + 0x8d98, /* mpllb cfg2 */ + 0xbfc1, /* mpllb cfg3 */ + 0x7166, /* mpllb cfg4 */ + 0x5f42, /* mpllb cfg5 */ + 0x2000, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x7800, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0000, /* mpllb cfg10 */ + }, +}; + +/* C20 basic DP 2.0 tables */ +static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { + .link_bit_rate = 1000000, /* 10 Gbps */ + .clock = 312500, + .tx = { 0xbe21, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mplla = { 0x3104, /* mplla cfg0 */ + 0xd105, /* mplla cfg1 */ + 0xc025, /* mplla cfg2 */ + 0xc025, /* mplla cfg3 */ + 0x8c00, /* mplla cfg4 */ + 0x759a, /* mplla cfg5 */ + 0x4000, /* mplla cfg6 */ + 0x0003, /* mplla cfg7 */ + 0x3555, /* mplla cfg8 */ + 0x0001, /* mplla cfg9 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = { + .link_bit_rate = 1350000, /* 13.5 Gbps */ + .clock = 421875, + .tx = { 0xbea0, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x015f, /* mpllb cfg0 */ + 0x2205, /* mpllb cfg1 */ + 0x1b17, /* mpllb cfg2 */ + 0xffc1, /* mpllb cfg3 */ + 0xe100, /* mpllb cfg4 */ + 0xbd00, /* mpllb cfg5 */ + 0x2000, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x4800, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0000, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { + .link_bit_rate = 2000000, /* 20 Gbps */ + .clock = 625000, + .tx = { 0xbe20, /* tx cfg0 */ + 0x4800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = {0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mplla = { 0x3104, /* mplla cfg0 */ + 0xd105, /* mplla cfg1 */ + 0xc025, /* mplla cfg2 */ + 0xc025, /* mplla cfg3 */ + 0xa6ab, /* mplla cfg4 */ + 0x8c00, /* mplla cfg5 */ + 0x4000, /* mplla cfg6 */ + 0x0003, /* mplla cfg7 */ + 0x3555, /* mplla cfg8 */ + 0x0001, /* mplla cfg9 */ + }, +}; + +static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = { + &mtl_c20_dp_rbr, + &mtl_c20_dp_hbr1, + &mtl_c20_dp_hbr2, + &mtl_c20_dp_hbr3, + &mtl_c20_dp_uhbr10, + &mtl_c20_dp_uhbr13_5, + &mtl_c20_dp_uhbr20, + NULL, +}; + +/* + * HDMI link rates with 38.4 MHz reference clock. + */ + +static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = { + .clock = 25200, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x4, + .pll[1] = 0, + .pll[2] = 0xB2, + .pll[3] = 0, + .pll[4] = 0, + .pll[5] = 0, + .pll[6] = 0, + .pll[7] = 0, + .pll[8] = 0x20, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0xD, + .pll[16] = 0x6, + .pll[17] = 0x8F, + .pll[18] = 0x84, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = { + .clock = 27000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, + .pll[1] = 0, + .pll[2] = 0xC0, + .pll[3] = 0, + .pll[4] = 0, + .pll[5] = 0, + .pll[6] = 0, + .pll[7] = 0, + .pll[8] = 0x20, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0x80, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0xD, + .pll[16] = 0x6, + .pll[17] = 0xCF, + .pll[18] = 0x84, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = { + .clock = 74250, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, + .pll[1] = 0, + .pll[2] = 0x7A, + .pll[3] = 0, + .pll[4] = 0, + .pll[5] = 0, + .pll[6] = 0, + .pll[7] = 0, + .pll[8] = 0x20, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0x58, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0xB, + .pll[16] = 0x6, + .pll[17] = 0xF, + .pll[18] = 0x85, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = { + .clock = 148500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, + .pll[1] = 0, + .pll[2] = 0x7A, + .pll[3] = 0, + .pll[4] = 0, + .pll[5] = 0, + .pll[6] = 0, + .pll[7] = 0, + .pll[8] = 0x20, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0x58, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0xA, + .pll[16] = 0x6, + .pll[17] = 0xF, + .pll[18] = 0x85, + .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_594 = { + .clock = 594000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, + .pll[1] = 0, + .pll[2] = 0x7A, + .pll[3] = 0, + .pll[4] = 0, + .pll[5] = 0, + .pll[6] = 0, + .pll[7] = 0, + .pll[8] = 0x20, + .pll[9] = 0x1, + .pll[10] = 0, + .pll[11] = 0, + .pll[12] = 0x58, + .pll[13] = 0, + .pll[14] = 0, + .pll[15] = 0x8, + .pll[16] = 0x6, + .pll[17] = 0xF, + .pll[18] = 0x85, + .pll[19] = 0x23, +}; + +/* Precomputed C10 HDMI PLL tables */ +static const struct intel_c10pll_state mtl_c10_hdmi_27027 = { + .clock = 27027, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xCC, .pll[12] = 0x9C, .pll[13] = 0xCB, .pll[14] = 0xCC, + .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_28320 = { + .clock = 28320, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_30240 = { + .clock = 30240, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_31500 = { + .clock = 31500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xA0, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0C, .pll[16] = 0x09, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_36000 = { + .clock = 36000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_40000 = { + .clock = 40000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x55, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_49500 = { + .clock = 49500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_50000 = { + .clock = 50000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x2A, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_57284 = { + .clock = 57284, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x77, .pll[12] = 0x57, .pll[13] = 0x77, .pll[14] = 0x77, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_58000 = { + .clock = 58000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xD5, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_65000 = { + .clock = 65000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xB5, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_71000 = { + .clock = 71000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_74176 = { + .clock = 74176, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_75000 = { + .clock = 75000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_78750 = { + .clock = 78750, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x08, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_85500 = { + .clock = 85500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x10, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_88750 = { + .clock = 88750, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x72, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_106500 = { + .clock = 106500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xF0, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_108000 = { + .clock = 108000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x80, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_115500 = { + .clock = 115500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_119000 = { + .clock = 119000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_135000 = { + .clock = 135000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0A, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_138500 = { + .clock = 138500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x22, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_147160 = { + .clock = 147160, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xA5, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_148352 = { + .clock = 148352, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_154000 = { + .clock = 154000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x35, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_162000 = { + .clock = 162000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x60, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_167000 = { + .clock = 167000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0xFA, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_197802 = { + .clock = 197802, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x99, .pll[12] = 0x05, .pll[13] = 0x98, .pll[14] = 0x99, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_198000 = { + .clock = 198000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_209800 = { + .clock = 209800, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x45, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_241500 = { + .clock = 241500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xC8, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_262750 = { + .clock = 262750, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x6C, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_268500 = { + .clock = 268500, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xEC, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_296703 = { + .clock = 296703, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x33, .pll[12] = 0x44, .pll[13] = 0x33, .pll[14] = 0x33, + .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_297000 = { + .clock = 297000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x58, .pll[13] = 0x00, .pll[14] = 0x00, + .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_319750 = { + .clock = 319750, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x44, .pll[13] = 0xA9, .pll[14] = 0xAA, + .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_497750 = { + .clock = 497750, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x9F, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_592000 = { + .clock = 592000, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x15, .pll[13] = 0x55, .pll[14] = 0x55, + .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state mtl_c10_hdmi_593407 = { + .clock = 593407, + .tx = 0x10, + .cmn = 0x1, + .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, + .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, + .pll[10] = 0xFF, .pll[11] = 0x3B, .pll[12] = 0x44, .pll[13] = 0xBA, .pll[14] = 0xBB, + .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, +}; + +static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = { + &mtl_c10_hdmi_25_2, /* Consolidated Table */ + &mtl_c10_hdmi_27_0, /* Consolidated Table */ + &mtl_c10_hdmi_27027, + &mtl_c10_hdmi_28320, + &mtl_c10_hdmi_30240, + &mtl_c10_hdmi_31500, + &mtl_c10_hdmi_36000, + &mtl_c10_hdmi_40000, + &mtl_c10_hdmi_49500, + &mtl_c10_hdmi_50000, + &mtl_c10_hdmi_57284, + &mtl_c10_hdmi_58000, + &mtl_c10_hdmi_65000, + &mtl_c10_hdmi_71000, + &mtl_c10_hdmi_74176, + &mtl_c10_hdmi_74_25, /* Consolidated Table */ + &mtl_c10_hdmi_75000, + &mtl_c10_hdmi_78750, + &mtl_c10_hdmi_85500, + &mtl_c10_hdmi_88750, + &mtl_c10_hdmi_106500, + &mtl_c10_hdmi_108000, + &mtl_c10_hdmi_115500, + &mtl_c10_hdmi_119000, + &mtl_c10_hdmi_135000, + &mtl_c10_hdmi_138500, + &mtl_c10_hdmi_147160, + &mtl_c10_hdmi_148352, + &mtl_c10_hdmi_148_5, /* Consolidated Table */ + &mtl_c10_hdmi_154000, + &mtl_c10_hdmi_162000, + &mtl_c10_hdmi_167000, + &mtl_c10_hdmi_197802, + &mtl_c10_hdmi_198000, + &mtl_c10_hdmi_209800, + &mtl_c10_hdmi_241500, + &mtl_c10_hdmi_262750, + &mtl_c10_hdmi_268500, + &mtl_c10_hdmi_296703, + &mtl_c10_hdmi_297000, + &mtl_c10_hdmi_319750, + &mtl_c10_hdmi_497750, + &mtl_c10_hdmi_592000, + &mtl_c10_hdmi_593407, + &mtl_c10_hdmi_594, /* Consolidated Table */ + NULL, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = { + .link_bit_rate = 25175, + .clock = 25175, + .tx = { 0xbe88, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0xa0d2, /* mpllb cfg0 */ + 0x7d80, /* mpllb cfg1 */ + 0x0906, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x0200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x0000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0001, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = { + .link_bit_rate = 27000, + .clock = 27000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0xa0e0, /* mpllb cfg0 */ + 0x7d80, /* mpllb cfg1 */ + 0x0906, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x8000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0001, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = { + .link_bit_rate = 74250, + .clock = 74250, + .tx = { 0xbe88, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x609a, /* mpllb cfg0 */ + 0x7d40, /* mpllb cfg1 */ + 0xca06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x5800, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0001, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = { + .link_bit_rate = 148500, + .clock = 148500, + .tx = { 0xbe88, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x409a, /* mpllb cfg0 */ + 0x7d20, /* mpllb cfg1 */ + 0xca06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x5800, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0001, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_594 = { + .link_bit_rate = 594000, + .clock = 594000, + .tx = { 0xbe88, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x009a, /* mpllb cfg0 */ + 0x7d08, /* mpllb cfg1 */ + 0xca06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x5800, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0001, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_300 = { + .link_bit_rate = 3000000, + .clock = 166670, + .tx = { 0xbe98, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x209c, /* mpllb cfg0 */ + 0x7d10, /* mpllb cfg1 */ + 0xca06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x2000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0004, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_600 = { + .link_bit_rate = 6000000, + .clock = 333330, + .tx = { 0xbe98, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x009c, /* mpllb cfg0 */ + 0x7d08, /* mpllb cfg1 */ + 0xca06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x2000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0004, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_800 = { + .link_bit_rate = 8000000, + .clock = 444440, + .tx = { 0xbe98, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x00d0, /* mpllb cfg0 */ + 0x7d08, /* mpllb cfg1 */ + 0x4a06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0003, /* mpllb cfg7 */ + 0x2aaa, /* mpllb cfg8 */ + 0x0002, /* mpllb cfg9 */ + 0x0004, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { + .link_bit_rate = 10000000, + .clock = 555560, + .tx = { 0xbe98, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x1104, /* mpllb cfg0 */ + 0x7d08, /* mpllb cfg1 */ + 0x0a06, /* mpllb cfg2 */ + 0xbe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0003, /* mpllb cfg7 */ + 0x3555, /* mpllb cfg8 */ + 0x0001, /* mpllb cfg9 */ + 0x0004, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { + .link_bit_rate = 12000000, + .clock = 666670, + .tx = { 0xbe98, /* tx cfg0 */ + 0x9800, /* tx cfg1 */ + 0x0000, /* tx cfg2 */ + }, + .cmn = { 0x0500, /* cmn cfg0*/ + 0x0005, /* cmn cfg1 */ + 0x0000, /* cmn cfg2 */ + 0x0000, /* cmn cfg3 */ + }, + .mpllb = { 0x0138, /* mpllb cfg0 */ + 0x7d08, /* mpllb cfg1 */ + 0x5486, /* mpllb cfg2 */ + 0xfe40, /* mpllb cfg3 */ + 0x0000, /* mpllb cfg4 */ + 0x0000, /* mpllb cfg5 */ + 0x2200, /* mpllb cfg6 */ + 0x0001, /* mpllb cfg7 */ + 0x4000, /* mpllb cfg8 */ + 0x0000, /* mpllb cfg9 */ + 0x0004, /* mpllb cfg10 */ + }, +}; + +static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = { + &mtl_c20_hdmi_25_175, + &mtl_c20_hdmi_27_0, + &mtl_c20_hdmi_74_25, + &mtl_c20_hdmi_148_5, + &mtl_c20_hdmi_594, + &mtl_c20_hdmi_300, + &mtl_c20_hdmi_600, + &mtl_c20_hdmi_800, + &mtl_c20_hdmi_1000, + &mtl_c20_hdmi_1200, + NULL, +}; + +static int intel_c10_phy_check_hdmi_link_rate(int clock) +{ + const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables; + int i; + + for (i = 0; tables[i]; i++) { + if (clock == tables[i]->clock) + return MODE_OK; + } + + return MODE_CLOCK_RANGE; +} + +static const struct intel_c10pll_state * const * +intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return mtl_c10_edp_tables; + else + return mtl_c10_dp_tables; + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + return mtl_c10_hdmi_tables; + } + + MISSING_CASE(encoder->type); + return NULL; +} + +static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state; + int i; + + if (intel_crtc_has_dp_encoder(crtc_state)) { + if (intel_panel_use_ssc(i915)) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + pll_state->ssc_enabled = + (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); + } + } + + if (pll_state->ssc_enabled) + return; + + drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); + for (i = 4; i < 9; i++) + pll_state->c10.pll[i] = 0; +} + +static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + const struct intel_c10pll_state * const *tables; + int i; + + tables = intel_c10pll_tables_get(crtc_state, encoder); + if (!tables) + return -EINVAL; + + for (i = 0; tables[i]; i++) { + if (crtc_state->port_clock == tables[i]->clock) { + crtc_state->cx0pll_state.c10 = *tables[i]; + intel_c10pll_update_pll(crtc_state, encoder); + + return 0; + } + } + + return -EINVAL; +} + +void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_c10pll_state *pll_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 lane = INTEL_CX0_LANE0; + intel_wakeref_t wakeref; + int i; + + wakeref = intel_cx0_phy_transaction_begin(encoder); + + /* + * According to C10 VDR Register programming Sequence we need + * to do this to read PHY internal registers from MsgBus. + */ + intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_MSGBUS_ACCESS, + MB_WRITE_COMMITTED); + + for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) + pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane, + PHY_C10_VDR_PLL(i)); + + pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0)); + pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0)); + + intel_cx0_phy_transaction_end(encoder, wakeref); +} + +static void intel_c10_pll_program(struct drm_i915_private *i915, + const struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10; + int i; + + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_MSGBUS_ACCESS, + MB_WRITE_COMMITTED); + + /* Custom width needs to be programmed to 0 for both the phy lanes */ + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, + C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, + MB_WRITE_COMMITTED); + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_UPDATE_CFG, + MB_WRITE_COMMITTED); + + /* Program the pll values only for the master lane */ + for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) + intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), + pll_state->pll[i], + (i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED); + + intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); + intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); + + intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1), + 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, + MB_WRITE_COMMITTED); +} + +void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, + const struct intel_c10pll_state *hw_state) +{ + bool fracen; + int i; + unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; + unsigned int multiplier, tx_clk_div; + + fracen = hw_state->pll[0] & C10_PLL0_FRACEN; + drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ", + str_yes_no(fracen)); + + if (fracen) { + frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; + frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; + frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; + drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n", + frac_quot, frac_rem, frac_den); + } + + multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | + hw_state->pll[2]) / 2 + 16; + tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); + drm_dbg_kms(&i915->drm, + "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); + + drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:"); + drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); + + BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); + for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) + drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", + i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], + i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); +} + +static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state) +{ + u64 datarate; + u64 mpll_tx_clk_div; + u64 vco_freq_shift; + u64 vco_freq; + u64 multiplier; + u64 mpll_multiplier; + u64 mpll_fracn_quot; + u64 mpll_fracn_rem; + u8 mpllb_ana_freq_vco; + u8 mpll_div_multiplier; + + if (pixel_clock < 25175 || pixel_clock > 600000) + return -EINVAL; + + datarate = ((u64)pixel_clock * 1000) * 10; + mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); + vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); + vco_freq = (datarate << vco_freq_shift) >> 8; + multiplier = div64_u64((vco_freq << 28), (REFCLK_38_4_MHZ >> 4)); + mpll_multiplier = 2 * (multiplier >> 32); + + mpll_fracn_quot = (multiplier >> 16) & 0xFFFF; + mpll_fracn_rem = multiplier & 0xFFFF; + + mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)), + datarate), 255); + + if (vco_freq <= DATARATE_3000000000) + mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3; + else if (vco_freq <= DATARATE_3500000000) + mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_2; + else if (vco_freq <= DATARATE_4000000000) + mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_1; + else + mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; + + pll_state->link_bit_rate = pixel_clock; + pll_state->clock = pixel_clock; + pll_state->tx[0] = 0xbe88; + pll_state->tx[1] = 0x9800; + pll_state->tx[2] = 0x0000; + pll_state->cmn[0] = 0x0500; + pll_state->cmn[1] = 0x0005; + pll_state->cmn[2] = 0x0000; + pll_state->cmn[3] = 0x0000; + pll_state->mpllb[0] = (MPLL_TX_CLK_DIV(mpll_tx_clk_div) | + MPLL_MULTIPLIER(mpll_multiplier)); + pll_state->mpllb[1] = (CAL_DAC_CODE(CAL_DAC_CODE_31) | + WORD_CLK_DIV | + MPLL_DIV_MULTIPLIER(mpll_div_multiplier)); + pll_state->mpllb[2] = (MPLLB_ANA_FREQ_VCO(mpllb_ana_freq_vco) | + CP_PROP(CP_PROP_20) | + CP_INT(CP_INT_6)); + pll_state->mpllb[3] = (V2I(V2I_2) | + CP_PROP_GS(CP_PROP_GS_30) | + CP_INT_GS(CP_INT_GS_28)); + pll_state->mpllb[4] = 0x0000; + pll_state->mpllb[5] = 0x0000; + pll_state->mpllb[6] = (C20_MPLLB_FRACEN | SSC_UP_SPREAD); + pll_state->mpllb[7] = MPLL_FRACN_DEN; + pll_state->mpllb[8] = mpll_fracn_quot; + pll_state->mpllb[9] = mpll_fracn_rem; + pll_state->mpllb[10] = HDMI_DIV(HDMI_DIV_1); + + return 0; +} + +static int intel_c20_phy_check_hdmi_link_rate(int clock) +{ + const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables; + int i; + + for (i = 0; tables[i]; i++) { + if (clock == tables[i]->link_bit_rate) + return MODE_OK; + } + + if (clock >= 25175 && clock <= 594000) + return MODE_OK; + + return MODE_CLOCK_RANGE; +} + +int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock) +{ + struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi); + struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (intel_is_c10phy(i915, phy)) + return intel_c10_phy_check_hdmi_link_rate(clock); + return intel_c20_phy_check_hdmi_link_rate(clock); +} + +static const struct intel_c20pll_state * const * +intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) + return mtl_c20_dp_tables; + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return mtl_c20_hdmi_tables; + + MISSING_CASE(encoder->type); + return NULL; +} + +static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + const struct intel_c20pll_state * const *tables; + int i; + + /* try computed C20 HDMI tables before using consolidated tables */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, + &crtc_state->cx0pll_state.c20) == 0) + return 0; + } + + tables = intel_c20_pll_tables_get(crtc_state, encoder); + if (!tables) + return -EINVAL; + + for (i = 0; tables[i]; i++) { + if (crtc_state->port_clock == tables[i]->link_bit_rate) { + crtc_state->cx0pll_state.c20 = *tables[i]; + return 0; + } + } + + return -EINVAL; +} + +int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_is_c10phy(i915, phy)) + return intel_c10pll_calc_state(crtc_state, encoder); + return intel_c20pll_calc_state(crtc_state, encoder); +} + +static bool intel_c20_use_mplla(u32 clock) +{ + /* 10G and 20G rates use MPLLA */ + if (clock == 312500 || clock == 625000) + return true; + + return false; +} + +void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_c20pll_state *pll_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + bool cntx; + intel_wakeref_t wakeref; + int i; + + wakeref = intel_cx0_phy_transaction_begin(encoder); + + /* 1. Read current context selection */ + cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE; + + /* Read Tx configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { + if (cntx) + pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_TX_CNTX_CFG(i)); + else + pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_TX_CNTX_CFG(i)); + } + + /* Read common configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { + if (cntx) + pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_CMN_CNTX_CFG(i)); + else + pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_CMN_CNTX_CFG(i)); + } + + if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { + /* MPLLB configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { + if (cntx) + pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_MPLLB_CNTX_CFG(i)); + else + pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_MPLLB_CNTX_CFG(i)); + } + } else { + /* MPLLA configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { + if (cntx) + pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_MPLLA_CNTX_CFG(i)); + else + pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_MPLLA_CNTX_CFG(i)); + } + } + + intel_cx0_phy_transaction_end(encoder, wakeref); +} + +void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, + const struct intel_c20pll_state *hw_state) +{ + int i; + + drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n"); + drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", + hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); + drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", + hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); + + if (intel_c20_use_mplla(hw_state->clock)) { + for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) + drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); + } else { + for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) + drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); + } +} + +static u8 intel_c20_get_dp_rate(u32 clock) +{ + switch (clock) { + case 162000: /* 1.62 Gbps DP1.4 */ + return 0; + case 270000: /* 2.7 Gbps DP1.4 */ + return 1; + case 540000: /* 5.4 Gbps DP 1.4 */ + return 2; + case 810000: /* 8.1 Gbps DP1.4 */ + return 3; + case 216000: /* 2.16 Gbps eDP */ + return 4; + case 243000: /* 2.43 Gbps eDP */ + return 5; + case 324000: /* 3.24 Gbps eDP */ + return 6; + case 432000: /* 4.32 Gbps eDP */ + return 7; + case 312500: /* 10 Gbps DP2.0 */ + return 8; + case 421875: /* 13.5 Gbps DP2.0 */ + return 9; + case 625000: /* 20 Gbps DP2.0*/ + return 10; + case 648000: /* 6.48 Gbps eDP*/ + return 11; + case 675000: /* 6.75 Gbps eDP*/ + return 12; + default: + MISSING_CASE(clock); + return 0; + } +} + +static u8 intel_c20_get_hdmi_rate(u32 clock) +{ + if (clock >= 25175 && clock <= 600000) + return 0; + + switch (clock) { + case 166670: /* 3 Gbps */ + case 333330: /* 6 Gbps */ + case 666670: /* 12 Gbps */ + return 1; + case 444440: /* 8 Gbps */ + return 2; + case 555560: /* 10 Gbps */ + return 3; + default: + MISSING_CASE(clock); + return 0; + } +} + +static bool is_dp2(u32 clock) +{ + /* DP2.0 clock rates */ + if (clock == 312500 || clock == 421875 || clock == 625000) + return true; + + return false; +} + +static bool is_hdmi_frl(u32 clock) +{ + switch (clock) { + case 166670: /* 3 Gbps */ + case 333330: /* 6 Gbps */ + case 444440: /* 8 Gbps */ + case 555560: /* 10 Gbps */ + case 666670: /* 12 Gbps */ + return true; + default: + return false; + } +} + +static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + + /* banks should not be cleared for DPALT/USB4/TBT modes */ + /* TODO: optimize re-calibration in legacy mode */ + return intel_tc_port_in_legacy_mode(intel_dig_port); +} + +static int intel_get_c20_custom_width(u32 clock, bool dp) +{ + if (dp && is_dp2(clock)) + return 2; + else if (is_hdmi_frl(clock)) + return 1; + else + return 0; +} + +static void intel_c20_pll_program(struct drm_i915_private *i915, + const struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20; + bool dp = false; + int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0; + bool cntx; + int i; + + if (intel_crtc_has_dp_encoder(crtc_state)) + dp = true; + + /* 1. Read current context selection */ + cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); + + /* + * 2. If there is a protocol switch from HDMI to DP or vice versa, clear + * the lane #0 MPLLB CAL_DONE_BANK DP2.0 10G and 20G rates enable MPLLA. + * Protocol switch is only applicable for MPLLA + */ + if (intel_c20_protocol_switch_valid(encoder)) { + for (i = 0; i < 4; i++) + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0); + usleep_range(4000, 4100); + } + + /* 3. Write SRAM configuration context. If A in use, write configuration to B context */ + /* 3.1 Tx configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { + if (cntx) + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]); + else + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]); + } + + /* 3.2 common configuration */ + for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { + if (cntx) + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]); + else + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]); + } + + /* 3.3 mpllb or mplla configuration */ + if (intel_c20_use_mplla(pll_state->clock)) { + for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { + if (cntx) + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_MPLLA_CNTX_CFG(i), + pll_state->mplla[i]); + else + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_MPLLA_CNTX_CFG(i), + pll_state->mplla[i]); + } + } else { + for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { + if (cntx) + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_A_MPLLB_CNTX_CFG(i), + pll_state->mpllb[i]); + else + intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + PHY_C20_B_MPLLB_CNTX_CFG(i), + pll_state->mpllb[i]); + } + } + + /* 4. Program custom width to match the link protocol */ + intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH, + PHY_C20_CUSTOM_WIDTH_MASK, + PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)), + MB_WRITE_COMMITTED); + + /* 5. For DP or 6. For HDMI */ + if (dp) { + intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, + BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)), + MB_WRITE_COMMITTED); + } else { + intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, + is_hdmi_frl(pll_state->clock) ? BIT(7) : 0, + MB_WRITE_COMMITTED); + + intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, + intel_c20_get_hdmi_rate(pll_state->clock), + MB_WRITE_COMMITTED); + } + + /* + * 7. Write Vendor specific registers to toggle context setting to load + * the updated programming toggle context bit + */ + intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); +} + +int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c10pll_state *pll_state) +{ + unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; + unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400; + int tmpclk = 0; + + if (pll_state->pll[0] & C10_PLL0_FRACEN) { + frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11]; + frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13]; + frac_den = pll_state->pll[10] << 8 | pll_state->pll[9]; + } + + multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 | + pll_state->pll[2]) / 2 + 16; + + tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]); + hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]); + + tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) + + DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den), + 10 << (tx_clk_div + 16)); + tmpclk *= (hdmi_div ? 2 : 1); + + return tmpclk; +} + +int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c20pll_state *pll_state) +{ + unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; + unsigned int multiplier, refclk = 38400; + unsigned int tx_clk_div; + unsigned int ref_clk_mpllb_div; + unsigned int fb_clk_div4_en; + unsigned int ref, vco; + unsigned int tx_rate_mult; + unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]); + + if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { + tx_rate_mult = 1; + frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]); + frac_quot = pll_state->mpllb[8]; + frac_rem = pll_state->mpllb[9]; + frac_den = pll_state->mpllb[7]; + multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]); + tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]); + ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]); + fb_clk_div4_en = 0; + } else { + tx_rate_mult = 2; + frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]); + frac_quot = pll_state->mplla[8]; + frac_rem = pll_state->mplla[9]; + frac_den = pll_state->mplla[7]; + multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]); + tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]); + ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]); + fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]); + } + + if (frac_en) + frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den); + else + frac = 0; + + ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div); + vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10); + + return vco << tx_rate_mult >> tx_clk_div >> tx_rate; +} + +static void intel_program_port_clock_ctl(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool lane_reversal) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u32 val = 0; + + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL, + lane_reversal ? XELPDP_PORT_REVERSAL : 0); + + if (lane_reversal) + val |= XELPDP_LANE1_PHY_CLOCK_SELECT; + + val |= XELPDP_FORWARD_CLOCK_UNGATE; + + if (is_hdmi_frl(crtc_state->port_clock)) + val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK); + else + val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK); + + /* TODO: HDMI FRL */ + /* DP2.0 10G and 20G rates enable MPLLA*/ + if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) + val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; + else + val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; + + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | + XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLB, val); +} + +static u32 intel_cx0_get_powerdown_update(u8 lane_mask) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_POWERDOWN_UPDATE(lane); + + return val; +} + +static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state); + + return val; +} + +static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, + enum port port, + u8 lane_mask, u8 state) +{ + enum phy phy = intel_port_to_phy(i915, port); + int lane; + + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), + intel_cx0_get_powerdown_state(lane_mask, state)); + + /* Wait for pending transactions.*/ + for_each_cx0_lane_in_mask(lane_mask, lane) + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + XELPDP_PORT_M2P_TRANSACTION_PENDING, + XELPDP_MSGBUS_TIMEOUT_SLOW)) { + drm_dbg_kms(&i915->drm, + "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", + phy_name(phy)); + intel_cx0_bus_reset(i915, port, lane); + } + + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), + intel_cx0_get_powerdown_update(lane_mask)); + + /* Update Timeout Value */ + if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), + intel_cx0_get_powerdown_update(lane_mask), 0, + XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); +} + +static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port) +{ + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + XELPDP_POWER_STATE_READY_MASK, + XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port), + XELPDP_POWER_STATE_ACTIVE_MASK | + XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, + XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | + XELPDP_PLL_LANE_STAGGERING_DELAY(0)); +} + +static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane); + + return val; +} + +static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane); + + return val; +} + +static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, + struct intel_encoder *encoder, + bool lane_reversal) +{ + enum port port = encoder->port; + enum phy phy = intel_port_to_phy(i915, port); + bool both_lanes = intel_tc_port_fia_max_lane_count(enc_to_dig_port(encoder)) > 2; + u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : + INTEL_CX0_LANE0; + u32 lane_pipe_reset = both_lanes ? + XELPDP_LANE_PIPE_RESET(0) | + XELPDP_LANE_PIPE_RESET(1) : + XELPDP_LANE_PIPE_RESET(0); + u32 lane_phy_current_status = both_lanes ? + XELPDP_LANE_PHY_CURRENT_STATUS(0) | + XELPDP_LANE_PHY_CURRENT_STATUS(1) : + XELPDP_LANE_PHY_CURRENT_STATUS(0); + + if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port), + XELPDP_PORT_BUF_SOC_PHY_READY, + XELPDP_PORT_BUF_SOC_PHY_READY, + XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", + phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); + + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1), + lane_pipe_reset); + + if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), + lane_phy_current_status, lane_phy_current_status, + XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); + + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port), + intel_cx0_get_pclk_refclk_request(both_lanes ? + INTEL_CX0_BOTH_LANES : + INTEL_CX0_LANE0), + intel_cx0_get_pclk_refclk_request(lane_mask)); + + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port), + intel_cx0_get_pclk_refclk_ack(both_lanes ? + INTEL_CX0_BOTH_LANES : + INTEL_CX0_LANE0), + intel_cx0_get_pclk_refclk_ack(lane_mask), + XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", + phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); + + intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES, + CX0_P2_STATE_RESET); + intel_cx0_setup_powerdown(i915, port); + + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0); + + if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status, + XELPDP_PORT_RESET_END_TIMEOUT)) + drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", + phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); +} + +static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, + struct intel_encoder *encoder, int lane_count, + bool lane_reversal) +{ + u8 l0t1, l0t2, l1t1, l1t2; + bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder)); + enum port port = encoder->port; + + if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) + intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, + PHY_C10_VDR_CONTROL(1), 0, + C10_VDR_CTRL_MSGBUS_ACCESS, + MB_WRITE_COMMITTED); + + /* TODO: DP-alt MFD case where only one PHY lane should be programmed. */ + l0t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2)); + l0t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2)); + l1t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2)); + l1t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2)); + + l0t1 |= CONTROL2_DISABLE_SINGLE_TX; + l0t2 |= CONTROL2_DISABLE_SINGLE_TX; + l1t1 |= CONTROL2_DISABLE_SINGLE_TX; + l1t2 |= CONTROL2_DISABLE_SINGLE_TX; + + if (lane_reversal) { + switch (lane_count) { + case 4: + l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; + fallthrough; + case 3: + l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; + fallthrough; + case 2: + l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX; + fallthrough; + case 1: + l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX; + break; + default: + MISSING_CASE(lane_count); + } + } else { + switch (lane_count) { + case 4: + l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX; + fallthrough; + case 3: + l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX; + fallthrough; + case 2: + l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; + l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; + break; + case 1: + if (dp_alt_mode) + l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; + else + l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; + break; + default: + MISSING_CASE(lane_count); + } + } + + /* disable MLs */ + intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2), + l0t1, MB_WRITE_COMMITTED); + intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2), + l0t2, MB_WRITE_COMMITTED); + intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2), + l1t1, MB_WRITE_COMMITTED); + intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2), + l1t2, MB_WRITE_COMMITTED); + + if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) + intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, + PHY_C10_VDR_CONTROL(1), 0, + C10_VDR_CTRL_UPDATE_CFG, + MB_WRITE_COMMITTED); +} + +static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane); + + return val; +} + +static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) +{ + u32 val = 0; + int lane = 0; + + for_each_cx0_lane_in_mask(lane_mask, lane) + val |= XELPDP_LANE_PCLK_PLL_ACK(lane); + + return val; +} + +static void intel_cx0pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 : + INTEL_CX0_LANE0; + intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); + + /* + * 1. Program PORT_CLOCK_CTL REGISTER to configure + * clock muxes, gating and SSC + */ + intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal); + + /* 2. Bring PHY out of reset. */ + intel_cx0_phy_lane_reset(i915, encoder, lane_reversal); + + /* + * 3. Change Phy power state to Ready. + * TODO: For DP alt mode use only one lane. + */ + intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, + CX0_P2_STATE_READY); + + /* 4. Program PHY internal PLL internal registers. */ + if (intel_is_c10phy(i915, phy)) + intel_c10_pll_program(i915, crtc_state, encoder); + else + intel_c20_pll_program(i915, crtc_state, encoder); + + /* + * 5. Program the enabled and disabled owned PHY lane + * transmitters over message bus + */ + intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal); + + /* + * 6. Follow the Display Voltage Frequency Switching - Sequence + * Before Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 7. Program DDI_CLK_VALFREQ to match intended DDI + * clock frequency. + */ + intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + crtc_state->port_clock); + + /* + * 8. Set PORT_CLOCK_CTL register PCLK PLL Request + * LN<Lane for maxPCLK> to "1" to enable PLL. + */ + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), + intel_cx0_get_pclk_pll_request(maxpclk_lane)); + + /* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), + intel_cx0_get_pclk_pll_ack(maxpclk_lane), + XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", + phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); + + /* + * 10. Follow the Display Voltage Frequency Switching Sequence After + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* TODO: enable TBT-ALT mode */ + intel_cx0_phy_transaction_end(encoder, wakeref); +} + +int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u32 clock; + u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + + clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + + drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); + drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); + drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK)); + + switch (clock) { + case XELPDP_DDI_CLOCK_SELECT_TBT_162: + return 162000; + case XELPDP_DDI_CLOCK_SELECT_TBT_270: + return 270000; + case XELPDP_DDI_CLOCK_SELECT_TBT_540: + return 540000; + case XELPDP_DDI_CLOCK_SELECT_TBT_810: + return 810000; + default: + MISSING_CASE(clock); + return 162000; + } +} + +static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) +{ + switch (clock) { + case 162000: + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + case 270000: + return XELPDP_DDI_CLOCK_SELECT_TBT_270; + case 540000: + return XELPDP_DDI_CLOCK_SELECT_TBT_540; + case 810000: + return XELPDP_DDI_CLOCK_SELECT_TBT_810; + default: + MISSING_CASE(clock); + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + } +} + +static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + u32 val = 0; + + /* + * 1. Program PORT_CLOCK_CTL REGISTER to configure + * clock muxes, gating and SSC + */ + val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); + val |= XELPDP_FORWARD_CLOCK_UNGATE; + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); + + /* 2. Read back PORT_CLOCK_CTL REGISTER */ + val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + + /* + * 3. Follow the Display Voltage Frequency Switching - Sequence + * Before Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. + */ + val |= XELPDP_TBT_CLOCK_REQUEST; + intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val); + + /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_TBT_CLOCK_ACK, + XELPDP_TBT_CLOCK_ACK, + 100, 0, NULL)) + drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", + encoder->base.base.id, encoder->base.name, phy_name(phy)); + + /* + * 6. Follow the Display Voltage Frequency Switching Sequence After + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 7. Program DDI_CLK_VALFREQ to match intended DDI + * clock frequency. + */ + intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + crtc_state->port_clock); +} + +void intel_mtl_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + intel_mtl_tbt_pll_enable(encoder, crtc_state); + else + intel_cx0pll_enable(encoder, crtc_state); +} + +static void intel_cx0pll_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + bool is_c10 = intel_is_c10phy(i915, phy); + intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); + + /* 1. Change owned PHY lane power to Disable state. */ + intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, + is_c10 ? CX0_P2PG_STATE_DISABLE : + CX0_P4PG_STATE_DISABLE); + + /* + * 2. Follow the Display Voltage Frequency Switching Sequence Before + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> + * to "0" to disable PLL. + */ + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | + intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); + + /* 4. Program DDI_CLK_VALFREQ to 0. */ + intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + + /* + * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". + */ + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | + intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, + XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) + drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", + phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); + + /* + * 6. Follow the Display Voltage Frequency Switching Sequence After + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_DDI_CLOCK_SELECT_MASK, 0); + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_FORWARD_CLOCK_UNGATE, 0); + + intel_cx0_phy_transaction_end(encoder, wakeref); +} + +static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + /* + * 1. Follow the Display Voltage Frequency Switching Sequence Before + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. + */ + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_TBT_CLOCK_REQUEST, 0); + + /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) + drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", + encoder->base.base.id, encoder->base.name, phy_name(phy)); + + /* + * 4. Follow the Display Voltage Frequency Switching Sequence After + * Frequency Change. We handle this step in bxt_set_cdclk(). + */ + + /* + * 5. Program PORT CLOCK CTRL register to disable and gate clocks + */ + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + XELPDP_DDI_CLOCK_SELECT_MASK | + XELPDP_FORWARD_CLOCK_UNGATE, 0); + + /* 6. Program DDI_CLK_VALFREQ to 0. */ + intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); +} + +void intel_mtl_pll_disable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + intel_mtl_tbt_pll_disable(encoder); + else + intel_cx0pll_disable(encoder); +} + +enum icl_port_dpll_id +intel_mtl_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + /* + * TODO: Determine the PLL type from the SW state, once MTL PLL + * handling is done via the standard shared DPLL framework. + */ + u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + + if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || + clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) + return ICL_PORT_DPLL_MG_PHY; + else + return ICL_PORT_DPLL_DEFAULT; +} + +void intel_c10pll_state_verify(struct intel_atomic_state *state, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_c10pll_state mpllb_hw_state = { 0 }; + struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_encoder *encoder; + enum phy phy; + int i; + + if (DISPLAY_VER(i915) < 14) + return; + + if (!new_crtc_state->hw.active) + return; + + /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ + if (!intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state)) + return; + + encoder = intel_get_crtc_new_encoder(state, new_crtc_state); + phy = intel_port_to_phy(i915, encoder->port); + + if (!intel_is_c10phy(i915, phy)) + return; + + intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state); + + for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { + u8 expected = mpllb_sw_state->pll[i]; + + I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, i, + expected, mpllb_hw_state.pll[i]); + } + + I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->tx, mpllb_hw_state.tx); + + I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->cmn, mpllb_hw_state.cmn); +} diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h new file mode 100644 index 000000000000..f99809af257d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_CX0_PHY_H__ +#define __INTEL_CX0_PHY_H__ + +#include <linux/types.h> +#include <linux/bitfield.h> +#include <linux/bits.h> + +#include "i915_drv.h" +#include "intel_display_types.h" + +struct drm_i915_private; +struct intel_encoder; +struct intel_crtc_state; +enum icl_port_dpll_id; +enum phy; + +bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy); +void intel_mtl_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_mtl_pll_disable(struct intel_encoder *encoder); +enum icl_port_dpll_id +intel_mtl_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state); +int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); +void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv, + const struct intel_c10pll_state *hw_state); +int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c10pll_state *pll_state); +void intel_c10pll_state_verify(struct intel_atomic_state *state, + struct intel_crtc_state *new_crtc_state); +void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_c20pll_state *pll_state); +void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, + const struct intel_c20pll_state *hw_state); +int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c20pll_state *pll_state); +void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock); +void intel_cx0_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + u32 level); +int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder); +#endif /* __INTEL_CX0_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h new file mode 100644 index 000000000000..cb5d1be2ba19 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_CX0_PHY_REGS_H__ +#define __INTEL_CX0_PHY_REGS_H__ + +#include "i915_reg_defs.h" + +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040 +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140 +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240 +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440 +#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4) +#define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31) +#define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27) +#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1) +#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2) +#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3) +#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16) +#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val) +#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15) +#define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0) +#define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val) +#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \ + _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8) +#define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31) +#define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27) +#define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4 +#define XELPDP_PORT_P2M_COMMAND_WRITE_ACK 0x5 +#define XELPDP_PORT_P2M_DATA_MASK REG_GENMASK(23, 16) +#define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val) +#define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15) + +#define XELPDP_MSGBUS_TIMEOUT_SLOW 1 +#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2 +#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200 +#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20 +#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100 +#define XELPDP_PORT_RESET_START_TIMEOUT_US 5 +#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100 +#define XELPDP_PORT_RESET_END_TIMEOUT 15 +#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1 + +#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004 +#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104 +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200 +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400 +#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_A, \ + _XELPDP_PORT_BUF_CTL1_LN0_B, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC2)) +#define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29) +#define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28) +#define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24) +#define XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK REG_GENMASK(19, 18) +#define XELPDP_PORT_BUF_PORT_DATA_10BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 0) +#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1) +#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2) +#define XELPDP_PORT_REVERSAL REG_BIT(16) +#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11) +#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7) +#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6) +#define XELPDP_TCSS_POWER_REQUEST REG_BIT(5) +#define XELPDP_TCSS_POWER_STATE REG_BIT(4) +#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1) +#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val) + +#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_A, \ + _XELPDP_PORT_BUF_CTL1_LN0_B, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4) + +#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30)) +#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28)) +#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24)) +#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20) +#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val) +#define _XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK REG_GENMASK(19, 16) +#define _XELPDP_LANE1_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK, val) +#define XELPDP_LANE_POWERDOWN_NEW_STATE(lane, val) _PICK(lane, \ + _XELPDP_LANE0_POWERDOWN_NEW_STATE(val), \ + _XELPDP_LANE1_POWERDOWN_NEW_STATE(val)) +#define XELPDP_LANE_POWERDOWN_NEW_STATE_MASK REG_GENMASK(3, 0) +#define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4) +#define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val) + +#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_A, \ + _XELPDP_PORT_BUF_CTL1_LN0_B, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ + _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8) +#define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8) +#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val) +#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0) +#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val) +#define CX0_P0_STATE_ACTIVE 0x0 +#define CX0_P2_STATE_READY 0x2 +#define CX0_P2PG_STATE_DISABLE 0x9 +#define CX0_P4PG_STATE_DISABLE 0xC +#define CX0_P2_STATE_RESET 0x2 + +#define _XELPDP_PORT_CLOCK_CTL_A 0x640E0 +#define _XELPDP_PORT_CLOCK_CTL_B 0x641E0 +#define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260 +#define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460 +#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + _XELPDP_PORT_CLOCK_CTL_A, \ + _XELPDP_PORT_CLOCK_CTL_B, \ + _XELPDP_PORT_CLOCK_CTL_USBC1, \ + _XELPDP_PORT_CLOCK_CTL_USBC2)) +#define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4)) +#define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4)) +#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4)) +#define XELPDP_LANE_PCLK_REFCLK_ACK(lane) REG_BIT(28 - ((lane) * 4)) + +#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19) +#define XELPDP_TBT_CLOCK_ACK REG_BIT(18) +#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12) +#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val) +#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0 +#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8 +#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9 +#define XELPDP_DDI_CLOCK_SELECT_TBT_162 0xc +#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd +#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe +#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf +#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10) +#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8) +#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1) +#define XELPDP_SSC_ENABLE_PLLB REG_BIT(0) + +/* C10 Vendor Registers */ +#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx)) +#define C10_PLL0_FRACEN REG_BIT8(4) +#define C10_PLL3_MULTIPLIERH_MASK REG_GENMASK8(3, 0) +#define C10_PLL15_TXCLKDIV_MASK REG_GENMASK8(2, 0) +#define C10_PLL15_HDMIDIV_MASK REG_GENMASK8(5, 3) + +#define PHY_C10_VDR_CMN(idx) (0xC20 + (idx)) +#define C10_CMN0_REF_RANGE REG_FIELD_PREP(REG_GENMASK(4, 0), 1) +#define C10_CMN0_REF_CLK_MPLLB_DIV REG_FIELD_PREP(REG_GENMASK(7, 5), 1) +#define C10_CMN3_TXVBOOST_MASK REG_GENMASK8(7, 5) +#define C10_CMN3_TXVBOOST(val) REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val) +#define PHY_C10_VDR_TX(idx) (0xC30 + (idx)) +#define C10_TX0_TX_MPLLB_SEL REG_BIT(4) +#define C10_TX1_TERMCTL_MASK REG_GENMASK8(7, 5) +#define C10_TX1_TERMCTL(val) REG_FIELD_PREP8(C10_TX1_TERMCTL_MASK, val) +#define PHY_C10_VDR_CONTROL(idx) (0xC70 + (idx) - 1) +#define C10_VDR_CTRL_MSGBUS_ACCESS REG_BIT8(2) +#define C10_VDR_CTRL_MASTER_LANE REG_BIT8(1) +#define C10_VDR_CTRL_UPDATE_CFG REG_BIT8(0) +#define PHY_C10_VDR_CUSTOM_WIDTH 0xD02 +#define C10_VDR_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0) +#define C10_VDR_CUSTOM_WIDTH_8_10 REG_FIELD_PREP(C10_VDR_CUSTOM_WIDTH_MASK, 0) +#define PHY_C10_VDR_OVRD 0xD71 +#define PHY_C10_VDR_OVRD_TX1 REG_BIT8(0) +#define PHY_C10_VDR_OVRD_TX2 REG_BIT8(2) +#define PHY_C10_VDR_PRE_OVRD_TX1 0xD80 +#define C10_PHY_OVRD_LEVEL_MASK REG_GENMASK8(5, 0) +#define C10_PHY_OVRD_LEVEL(val) REG_FIELD_PREP8(C10_PHY_OVRD_LEVEL_MASK, val) +#define PHY_CX0_VDROVRD_CTL(lane, tx, control) \ + (PHY_C10_VDR_PRE_OVRD_TX1 + \ + ((lane) ^ (tx)) * 0x10 + (control)) + +/* PIPE SPEC Defined Registers */ +#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control)) +#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6) + +/* C20 Registers */ +#define PHY_C20_WR_ADDRESS_L 0xC02 +#define PHY_C20_WR_ADDRESS_H 0xC03 +#define PHY_C20_WR_DATA_L 0xC04 +#define PHY_C20_WR_DATA_H 0xC05 +#define PHY_C20_RD_ADDRESS_L 0xC06 +#define PHY_C20_RD_ADDRESS_H 0xC07 +#define PHY_C20_RD_DATA_L 0xC08 +#define PHY_C20_RD_DATA_H 0xC09 +#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00 +#define PHY_C20_VDR_HDMI_RATE 0xD01 +#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0) +#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1) +#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val) +#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02 +#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0) +#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val) +#define PHY_C20_A_TX_CNTX_CFG(idx) (0xCF2E - (idx)) +#define PHY_C20_B_TX_CNTX_CFG(idx) (0xCF2A - (idx)) +#define C20_PHY_TX_RATE REG_GENMASK(2, 0) +#define PHY_C20_A_CMN_CNTX_CFG(idx) (0xCDAA - (idx)) +#define PHY_C20_B_CMN_CNTX_CFG(idx) (0xCDA5 - (idx)) +#define PHY_C20_A_MPLLA_CNTX_CFG(idx) (0xCCF0 - (idx)) +#define PHY_C20_B_MPLLA_CNTX_CFG(idx) (0xCCE5 - (idx)) +#define C20_MPLLA_FRACEN REG_BIT(14) +#define C20_FB_CLK_DIV4_EN REG_BIT(13) +#define C20_MPLLA_TX_CLK_DIV_MASK REG_GENMASK(10, 8) +#define PHY_C20_A_MPLLB_CNTX_CFG(idx) (0xCB5A - (idx)) +#define PHY_C20_B_MPLLB_CNTX_CFG(idx) (0xCB4E - (idx)) +#define C20_MPLLB_TX_CLK_DIV_MASK REG_GENMASK(15, 13) +#define C20_MPLLB_FRACEN REG_BIT(13) +#define C20_REF_CLK_MPLLB_DIV_MASK REG_GENMASK(12, 10) +#define C20_MULTIPLIER_MASK REG_GENMASK(11, 0) +#define C20_PHY_USE_MPLLB REG_BIT(7) + +/* C20 Phy VSwing Masks */ +#define C20_PHY_VSWING_PREEMPH_MASK REG_GENMASK8(5, 0) +#define C20_PHY_VSWING_PREEMPH(val) REG_FIELD_PREP8(C20_PHY_VSWING_PREEMPH_MASK, val) + +#define RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(idx) (0x303D + (idx)) + +/* C20 HDMI computed pll definitions */ +#define REFCLK_38_4_MHZ 38400000 +#define CLOCK_4999MHZ 4999999999 +#define CLOCK_9999MHZ 9999999999 +#define DATARATE_3000000000 3000000000 +#define DATARATE_3500000000 3500000000 +#define DATARATE_4000000000 4000000000 +#define MPLL_FRACN_DEN 0xFFFF + +#define SSC_UP_SPREAD REG_BIT16(9) +#define WORD_CLK_DIV REG_BIT16(8) + +#define MPLL_TX_CLK_DIV(val) REG_FIELD_PREP16(C20_MPLLB_TX_CLK_DIV_MASK, val) +#define MPLL_MULTIPLIER(val) REG_FIELD_PREP16(C20_MULTIPLIER_MASK, val) + +#define MPLLB_ANA_FREQ_VCO_0 0 +#define MPLLB_ANA_FREQ_VCO_1 1 +#define MPLLB_ANA_FREQ_VCO_2 2 +#define MPLLB_ANA_FREQ_VCO_3 3 +#define MPLLB_ANA_FREQ_VCO_MASK REG_GENMASK16(15, 14) +#define MPLLB_ANA_FREQ_VCO(val) REG_FIELD_PREP16(MPLLB_ANA_FREQ_VCO_MASK, val) + +#define MPLL_DIV_MULTIPLIER_MASK REG_GENMASK16(7, 0) +#define MPLL_DIV_MULTIPLIER(val) REG_FIELD_PREP16(MPLL_DIV_MULTIPLIER_MASK, val) + +#define CAL_DAC_CODE_31 31 +#define CAL_DAC_CODE_MASK REG_GENMASK16(14, 10) +#define CAL_DAC_CODE(val) REG_FIELD_PREP16(CAL_DAC_CODE_MASK, val) + +#define CP_INT_GS_28 28 +#define CP_INT_GS_MASK REG_GENMASK16(6, 0) +#define CP_INT_GS(val) REG_FIELD_PREP16(CP_INT_GS_MASK, val) + +#define CP_PROP_GS_30 30 +#define CP_PROP_GS_MASK REG_GENMASK16(13, 7) +#define CP_PROP_GS(val) REG_FIELD_PREP16(CP_PROP_GS_MASK, val) + +#define CP_INT_6 6 +#define CP_INT_MASK REG_GENMASK16(6, 0) +#define CP_INT(val) REG_FIELD_PREP16(CP_INT_MASK, val) + +#define CP_PROP_20 20 +#define CP_PROP_MASK REG_GENMASK16(13, 7) +#define CP_PROP(val) REG_FIELD_PREP16(CP_PROP_MASK, val) + +#define V2I_2 2 +#define V2I_MASK REG_GENMASK16(15, 14) +#define V2I(val) REG_FIELD_PREP16(V2I_MASK, val) + +#define HDMI_DIV_1 1 +#define HDMI_DIV_MASK REG_GENMASK16(2, 0) +#define HDMI_DIV(val) REG_FIELD_PREP16(HDMI_DIV_MASK, val) + +#endif /* __INTEL_CX0_REG_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 3a7b98837516..70d44edd8c6e 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -39,6 +39,8 @@ #include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" +#include "intel_cx0_phy.h" +#include "intel_cx0_phy_regs.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" @@ -61,6 +63,7 @@ #include "intel_hti.h" #include "intel_lspcon.h" #include "intel_mg_phy_regs.h" +#include "intel_modeset_lock.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" @@ -68,7 +71,6 @@ #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" -#include "intel_vrr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -169,6 +171,18 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, trans->entries[level].hsw.trans2); } +static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port) +{ + int ret; + + /* FIXME: find out why Bspec's 100us timeout is too short */ + ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & + XELPDP_PORT_BUF_PHY_IDLE), 10000); + if (ret) + drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n", + port_name(port)); +} + void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { @@ -196,7 +210,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, return; } - if (IS_DG2(dev_priv)) { + if (DISPLAY_VER(dev_priv) >= 14) { + timeout_us = 10000; + } else if (IS_DG2(dev_priv)) { timeout_us = 1200; } else if (DISPLAY_VER(dev_priv) >= 12) { if (intel_phy_is_tc(dev_priv, phy)) @@ -207,8 +223,12 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, timeout_us = 500; } - ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & - DDI_BUF_IS_IDLE), timeout_us, 10, 10); + if (DISPLAY_VER(dev_priv) >= 14) + ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE), + timeout_us, 10, 10); + else + ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), + timeout_us, 10, 10); if (ret) drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n", @@ -313,6 +333,13 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, DDI_PORT_WIDTH(crtc_state->lane_count) | DDI_BUF_TRANS_SELECT(0); + if (DISPLAY_VER(i915) >= 14) { + if (intel_dp_is_uhbr(crtc_state)) + intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT; + else + intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT; + } + if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) { intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) @@ -515,6 +542,8 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, temp |= TRANS_DDI_HDMI_SCRAMBLING; if (crtc_state->hdmi_high_tmds_clock_ratio) temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; + if (DISPLAY_VER(dev_priv) >= 14) + temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; temp |= (crtc_state->fdi_lanes - 1) << 1; @@ -2309,6 +2338,179 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) OVERLAP_PIXELS_MASK, dss1); } +static u8 mtl_get_port_width(u8 lane_count) +{ + switch (lane_count) { + case 1: + return 0; + case 2: + return 1; + case 3: + return 4; + case 4: + return 3; + default: + MISSING_CASE(lane_count); + return 4; + } +} + +static void +mtl_ddi_enable_d2d(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0, + XELPDP_PORT_BUF_D2D_LINK_ENABLE); + + if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & + XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) { + drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n", + port_name(port)); + } +} + +static void mtl_port_buf_ctl_program(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum port port = encoder->port; + u32 val; + + val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + val &= ~XELPDP_PORT_WIDTH_MASK; + val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count)); + + val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK; + if (intel_dp_is_uhbr(crtc_state)) + val |= XELPDP_PORT_BUF_PORT_DATA_40BIT; + else + val |= XELPDP_PORT_BUF_PORT_DATA_10BIT; + + if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) + val |= XELPDP_PORT_REVERSAL; + + intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); +} + +static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + u32 val; + + val = intel_tc_port_in_tbt_alt_mode(dig_port) ? + XELPDP_PORT_BUF_IO_SELECT_TBT : 0; + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), + XELPDP_PORT_BUF_IO_SELECT_TBT, val); +} + +static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); + + intel_dp_set_link_params(intel_dp, + crtc_state->port_clock, + crtc_state->lane_count); + + /* + * We only configure what the register value will be here. Actual + * enabling happens during link training farther down. + */ + intel_ddi_init_dp_buf_reg(encoder, crtc_state); + + /* + * 1. Enable Power Wells + * + * This was handled at the beginning of intel_atomic_commit_tail(), + * before we called down into this function. + */ + + /* 2. PMdemand was already set */ + + /* 3. Select Thunderbolt */ + mtl_port_buf_ctl_io_selection(encoder); + + /* 4. Enable Panel Power if PPS is required */ + intel_pps_on(intel_dp); + + /* 5. Enable the port PLL */ + intel_ddi_enable_clock(encoder, crtc_state); + + /* + * 6.a Configure Transcoder Clock Select to direct the Port clock to the + * Transcoder. + */ + intel_ddi_enable_transcoder_clock(encoder, crtc_state); + + /* + * 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings. + */ + intel_ddi_config_transcoder_dp2(encoder, crtc_state); + + /* + * 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST + * Transport Select + */ + intel_ddi_config_transcoder_func(encoder, crtc_state); + + /* + * 6.e Program CoG/MSO configuration bits in DSS_CTL1 if selected. + */ + intel_ddi_mso_configure(crtc_state); + + if (!is_mst) + intel_dp_set_power(intel_dp, DP_SET_POWER_D0); + + intel_dp_configure_protocol_converter(intel_dp, crtc_state); + intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); + /* + * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit + * in the FEC_CONFIGURATION register to 1 before initiating link + * training + */ + intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + + intel_dp_check_frl_training(intel_dp); + intel_dp_pcon_dsc_configure(intel_dp, crtc_state); + + /* + * 6. The rest of the below are substeps under the bspec's "Enable and + * Train Display Port" step. Note that steps that are specific to + * MST will be handled by intel_mst_pre_enable_dp() before/after it + * calls into this function. Also intel_mst_pre_enable_dp() only calls + * us when active_mst_links==0, so any steps designated for "single + * stream or multi-stream master transcoder" can just be performed + * unconditionally here. + * + * mtl_ddi_prepare_link_retrain() that is called by + * intel_dp_start_link_train() will execute steps: 6.d, 6.f, 6.g, 6.h, + * 6.i and 6.j + * + * 6.k Follow DisplayPort specification training sequence (see notes for + * failure handling) + * 6.m If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle + * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) + * (timeout after 800 us) + */ + intel_dp_start_link_train(intel_dp, crtc_state); + + /* 6.n Set DP_TP_CTL link training to Normal */ + if (!is_trans_port_sync_mode(crtc_state)) + intel_dp_stop_link_train(intel_dp, crtc_state); + + /* 6.o Configure and enable FEC if needed */ + intel_ddi_enable_fec(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); +} + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -2523,7 +2725,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), crtc_state); - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) + mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); + else if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -2597,15 +2801,57 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, /* FIXME precompute everything properly */ /* FIXME how do we turn infoframes off again? */ - if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) + if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp)) dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); } } -static void intel_disable_ddi_buf(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static void +mtl_ddi_disable_d2d_link(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), + XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0); + + if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & + XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) + drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n", + port_name(port)); +} + +static void mtl_disable_ddi_buf(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + /* 3.b Clear DDI_CTL_DE Enable to 0. */ + val = intel_de_read(dev_priv, DDI_BUF_CTL(port)); + if (val & DDI_BUF_CTL_ENABLE) { + val &= ~DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(port), val); + + /* 3.c Poll for PORT_BUF_CTL Idle Status == 1, timeout after 100us */ + mtl_wait_ddi_buf_idle(dev_priv, port); + } + + /* 3.d Disable D2D Link */ + mtl_ddi_disable_d2d_link(encoder); + + /* 3.e Disable DP_TP_CTL */ + if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_ENABLE, 0); + } +} + +static void disable_ddi_buf(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -2630,6 +2876,21 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, intel_wait_ddi_buf_idle(dev_priv, port); } +static void intel_disable_ddi_buf(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (DISPLAY_VER(dev_priv) >= 14) { + mtl_disable_ddi_buf(encoder, crtc_state); + + /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */ + intel_ddi_disable_fec_state(encoder, crtc_state); + } else { + disable_ddi_buf(encoder, crtc_state); + } +} + static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -2638,6 +2899,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; + intel_wakeref_t wakeref; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); @@ -2677,12 +2939,19 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, intel_pps_vdd_on(intel_dp); intel_pps_off(intel_dp); - if (!intel_tc_port_in_tbt_alt_mode(dig_port)) + wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref); + + if (wakeref) intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, - fetch_and_zero(&dig_port->ddi_io_wakeref)); + wakeref); intel_ddi_disable_clock(encoder); + + /* De-select Thunderbolt */ + if (DISPLAY_VER(dev_priv) >= 14) + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port), + XELPDP_PORT_BUF_IO_SELECT_TBT, 0); } static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, @@ -2693,6 +2962,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; + intel_wakeref_t wakeref; dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); @@ -2705,9 +2975,11 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 12) intel_ddi_disable_transcoder_clock(old_crtc_state); - intel_display_power_put(dev_priv, - dig_port->ddi_io_power_domain, - fetch_and_zero(&dig_port->ddi_io_wakeref)); + wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref); + if (wakeref) + intel_display_power_put(dev_priv, + dig_port->ddi_io_power_domain, + wakeref); intel_ddi_disable_clock(encoder); @@ -2725,8 +2997,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { intel_crtc_vblank_off(old_crtc_state); - intel_vrr_disable(old_crtc_state); - intel_disable_transcoder(old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state); @@ -2840,7 +3110,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); - if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) + if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp)) intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); intel_audio_codec_enable(encoder, crtc_state, conn_state); @@ -2890,6 +3160,10 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, if (has_buf_trans_select(dev_priv)) hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state); + /* e. Enable D2D Link for C10/C20 Phy */ + if (DISPLAY_VER(dev_priv) >= 14) + mtl_ddi_enable_d2d(encoder); + encoder->set_signal_levels(encoder, crtc_state); /* Display WA #1143: skl,kbl,cfl */ @@ -2935,12 +3209,30 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, * * On ADL_P the PHY link rate and lane count must be programmed but * these are both 0 for HDMI. + * + * But MTL onwards HDMI2.1 is supported and in TMDS mode this + * is filled with lane count, already set in the crtc_state. + * The same is required to be filled in PORT_BUF_CTL for C10/20 Phy. */ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE; - if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { + if (DISPLAY_VER(dev_priv) >= 14) { + u8 lane_count = mtl_get_port_width(crtc_state->lane_count); + u32 port_buf = 0; + + port_buf |= XELPDP_PORT_WIDTH(lane_count); + + if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) + port_buf |= XELPDP_PORT_REVERSAL; + + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), + XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); + + buf_ctl |= DDI_PORT_WIDTH(lane_count); + } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } + intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); intel_wait_ddi_buf_active(dev_priv, port); @@ -2963,8 +3255,6 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_enable_transcoder(crtc_state); - intel_vrr_enable(encoder, crtc_state); - intel_crtc_vblank_on(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) @@ -2975,9 +3265,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, /* Enable hdcp if it's desired */ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(to_intel_connector(conn_state->connector), - crtc_state, - (u8)conn_state->hdcp_content_type); + intel_hdcp_enable(state, encoder, crtc_state, conn_state); } static void intel_disable_ddi_dp(struct intel_atomic_state *state, @@ -3023,6 +3311,8 @@ static void intel_disable_ddi(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + intel_tc_port_link_cancel_reset_work(enc_to_dig_port(encoder)); + intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) @@ -3070,7 +3360,8 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *slave_crtc; enum phy phy = intel_port_to_phy(i915, encoder->port); - if (!intel_phy_is_tc(i915, phy)) + /* FIXME: Add MTL pll_mgr */ + if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy)) return; intel_update_active_dpll(state, crtc, encoder); @@ -3121,6 +3412,53 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0); } +static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 dp_tp_ctl; + + /* + * TODO: To train with only a different voltage swing entry is not + * necessary disable and enable port + */ + dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + if (dp_tp_ctl & DP_TP_CTL_ENABLE) + mtl_disable_ddi_buf(encoder, crtc_state); + + /* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */ + dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { + dp_tp_ctl |= DP_TP_CTL_MODE_MST; + } else { + dp_tp_ctl |= DP_TP_CTL_MODE_SST; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); + intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + + /* 6.f Enable D2D Link */ + mtl_ddi_enable_d2d(encoder); + + /* 6.g Configure voltage swing and related IO settings */ + encoder->set_signal_levels(encoder, crtc_state); + + /* 6.h Configure PORT_BUF_CTL1 */ + mtl_port_buf_ctl_program(encoder, crtc_state); + + /* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */ + intel_dp->DP |= DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); + + /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */ + intel_wait_ddi_buf_active(dev_priv, port); +} + static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -3369,7 +3707,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, fallthrough; case TRANS_DDI_MODE_SELECT_DVI: pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); - pipe_config->lane_count = 4; + if (DISPLAY_VER(dev_priv) >= 14) + pipe_config->lane_count = + ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; + else + pipe_config->lane_count = 4; break; case TRANS_DDI_MODE_SELECT_DP_SST: if (encoder->type == INTEL_OUTPUT_EDP) @@ -3396,7 +3738,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->fec_enable); } - if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) + if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp)) pipe_config->infoframes.enable |= intel_lspcon_infoframes_enabled(encoder, pipe_config); else @@ -3506,6 +3848,28 @@ void intel_ddi_get_clock(struct intel_encoder *encoder, &crtc_state->dpll_hw_state); } +static void mtl_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) { + crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); + } else if (intel_is_c10phy(i915, phy)) { + intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10); + intel_c10pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c10); + crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); + } else { + intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20); + intel_c20pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c20); + crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); + } + + intel_ddi_get_config(encoder, crtc_state); +} + static void dg2_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { @@ -3700,6 +4064,9 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) { + pipe_config->has_hdmi_sink = + intel_hdmi_compute_has_hdmi_sink(encoder, pipe_config, conn_state); + ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); } else { ret = intel_dp_compute_config(encoder, pipe_config, conn_state); @@ -3865,14 +4232,25 @@ static void intel_ddi_encoder_reset(struct drm_encoder *encoder) intel_tc_port_init_mode(dig_port); } +static int intel_ddi_encoder_late_register(struct drm_encoder *_encoder) +{ + struct intel_encoder *encoder = to_intel_encoder(_encoder); + + intel_tc_port_link_reset(enc_to_dig_port(encoder)); + + return 0; +} + static const struct drm_encoder_funcs intel_ddi_funcs = { .reset = intel_ddi_encoder_reset, .destroy = intel_ddi_encoder_destroy, + .late_register = intel_ddi_encoder_late_register, }; static struct intel_connector * intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_connector *connector; enum port port = dig_port->base.port; @@ -3881,7 +4259,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) return NULL; dig_port->dp.output_reg = DDI_BUF_CTL(port); - dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; + if (DISPLAY_VER(i915) >= 14) + dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain; + else + dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; dig_port->dp.set_link_train = intel_ddi_set_link_train; dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; @@ -3921,6 +4302,7 @@ static int modeset_pipe(struct drm_crtc *crtc, return -ENOMEM; state->acquire_ctx = ctx; + to_intel_atomic_state(state)->internal = true; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { @@ -4032,27 +4414,17 @@ intel_ddi_hotplug(struct intel_encoder *encoder, state = intel_encoder_hotplug(encoder, connector); - drm_modeset_acquire_init(&ctx, 0); - - for (;;) { - if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) - ret = intel_hdmi_reset_link(encoder, &ctx); - else - ret = intel_dp_retrain_link(encoder, &ctx); - - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - continue; + if (!intel_tc_port_link_reset(dig_port)) { + intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) + ret = intel_hdmi_reset_link(encoder, &ctx); + else + ret = intel_dp_retrain_link(encoder, &ctx); } - break; + drm_WARN_ON(encoder->base.dev, ret); } - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - drm_WARN(encoder->base.dev, ret, - "Acquiring modeset locks failed with %i\n", ret); - /* * Unpowered type-c dongles can take some time to boot and be * responsible, so here giving some time to those dongles to power up @@ -4257,31 +4629,27 @@ static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port) static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum phy phy = intel_port_to_phy(i915, encoder->port); - intel_dp_encoder_suspend(encoder); - - if (!intel_phy_is_tc(i915, phy)) - return; - - intel_tc_port_flush_work(dig_port); } -static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) +static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum phy phy = intel_port_to_phy(i915, encoder->port); + intel_tc_port_suspend(dig_port); +} + +static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) +{ intel_dp_encoder_shutdown(encoder); intel_hdmi_encoder_shutdown(encoder); +} - if (!intel_phy_is_tc(i915, phy)) - return; +static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); intel_tc_port_cleanup(dig_port); } @@ -4413,7 +4781,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->cloneable = 0; encoder->pipe_mask = ~0; - if (IS_DG2(dev_priv)) { + if (DISPLAY_VER(dev_priv) >= 14) { + encoder->enable_clock = intel_mtl_pll_enable; + encoder->disable_clock = intel_mtl_pll_disable; + encoder->port_pll_type = intel_mtl_port_pll_type; + encoder->get_config = mtl_ddi_get_config; + } else if (IS_DG2(dev_priv)) { encoder->enable_clock = intel_mpllb_enable; encoder->disable_clock = intel_mpllb_disable; encoder->get_config = dg2_ddi_get_config; @@ -4473,7 +4846,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_config = hsw_ddi_get_config; } - if (IS_DG2(dev_priv)) { + if (DISPLAY_VER(dev_priv) >= 14) { + encoder->set_signal_levels = intel_cx0_phy_set_signal_levels; + } else if (IS_DG2(dev_priv)) { encoder->set_signal_levels = intel_snps_phy_set_signal_levels; } else if (DISPLAY_VER(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) @@ -4541,6 +4916,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) is_legacy ? "legacy" : "non-legacy"); } + encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete; + encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete; + if (intel_tc_port_init(dig_port, is_legacy) < 0) goto err; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 006a2e979000..b7d20485bde5 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -9,6 +9,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_cx0_phy.h" /* HDMI/DVI modes ignore everything but the last 2 items. So we share * them for both DP and FDI transports, allowing those ports to @@ -1035,6 +1036,65 @@ static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = { .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr), }; +static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = { + { .snps = { 26, 0, 0 } }, /* preset 0 */ + { .snps = { 33, 0, 6 } }, /* preset 1 */ + { .snps = { 38, 0, 11 } }, /* preset 2 */ + { .snps = { 43, 0, 19 } }, /* preset 3 */ + { .snps = { 39, 0, 0 } }, /* preset 4 */ + { .snps = { 45, 0, 7 } }, /* preset 5 */ + { .snps = { 46, 0, 13 } }, /* preset 6 */ + { .snps = { 46, 0, 0 } }, /* preset 7 */ + { .snps = { 55, 0, 7 } }, /* preset 8 */ + { .snps = { 62, 0, 0 } }, /* preset 9 */ +}; + +static const struct intel_ddi_buf_trans mtl_cx0_trans = { + .entries = _mtl_c10_trans_dp14, + .num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14), + .hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1, +}; + +/* DP2.0 */ +static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = { + { .snps = { 48, 0, 0 } }, /* preset 0 */ + { .snps = { 43, 0, 5 } }, /* preset 1 */ + { .snps = { 40, 0, 8 } }, /* preset 2 */ + { .snps = { 37, 0, 11 } }, /* preset 3 */ + { .snps = { 33, 0, 15 } }, /* preset 4 */ + { .snps = { 46, 2, 0 } }, /* preset 5 */ + { .snps = { 42, 2, 4 } }, /* preset 6 */ + { .snps = { 38, 2, 8 } }, /* preset 7 */ + { .snps = { 35, 2, 11 } }, /* preset 8 */ + { .snps = { 33, 2, 13 } }, /* preset 9 */ + { .snps = { 44, 4, 0 } }, /* preset 10 */ + { .snps = { 40, 4, 4 } }, /* preset 11 */ + { .snps = { 37, 4, 7 } }, /* preset 12 */ + { .snps = { 33, 4, 11 } }, /* preset 13 */ + { .snps = { 40, 8, 0 } }, /* preset 14 */ + { .snps = { 28, 2, 2 } }, /* preset 15 */ +}; + +/* HDMI2.0 */ +static const union intel_ddi_buf_trans_entry _mtl_c20_trans_hdmi[] = { + { .snps = { 48, 0, 0 } }, /* preset 0 */ + { .snps = { 38, 4, 6 } }, /* preset 1 */ + { .snps = { 36, 4, 8 } }, /* preset 2 */ + { .snps = { 34, 4, 10 } }, /* preset 3 */ + { .snps = { 32, 4, 12 } }, /* preset 4 */ +}; + +static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = { + .entries = _mtl_c20_trans_hdmi, + .num_entries = ARRAY_SIZE(_mtl_c20_trans_hdmi), + .hdmi_default_entry = 0, +}; + +static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = { + .entries = _mtl_c20_trans_uhbr, + .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr), +}; + bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) { return table == &tgl_combo_phy_trans_edp_hbr2_hobl; @@ -1606,12 +1666,30 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder, return intel_get_buf_trans(&dg2_snps_trans, n_entries); } +static const struct intel_ddi_buf_trans * +mtl_get_cx0_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000) + return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy))) + return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries); + else + return intel_get_buf_trans(&mtl_cx0_trans, n_entries); +} + void intel_ddi_buf_trans_init(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); - if (IS_DG2(i915)) { + if (DISPLAY_VER(i915) >= 14) { + encoder->get_buf_trans = mtl_get_cx0_buf_trans; + } else if (IS_DG2(i915)) { encoder->get_buf_trans = dg2_get_snps_buf_trans; } else if (IS_ALDERLAKE_P(i915)) { if (intel_phy_is_combo(i915, phy)) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3c29792137a5..f51a55f4e9d0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -31,8 +31,6 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/string_helpers.h> -#include <linux/vga_switcheroo.h> -#include <acpi/video.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> @@ -41,7 +39,6 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> -#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> @@ -57,7 +54,6 @@ #include "i9xx_plane.h" #include "i9xx_wm.h" #include "icl_dsi.h" -#include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" @@ -70,7 +66,7 @@ #include "intel_crtc_state_dump.h" #include "intel_ddi.h" #include "intel_de.h" -#include "intel_display_debugfs.h" +#include "intel_display_driver.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dmc.h" @@ -90,11 +86,8 @@ #include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_frontbuffer.h" -#include "intel_gmbus.h" -#include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_hotplug.h" -#include "intel_hti.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_modeset_setup.h" @@ -108,7 +101,6 @@ #include "intel_plane_initial.h" #include "intel_pps.h" #include "intel_psr.h" -#include "intel_quirks.h" #include "intel_sdvo.h" #include "intel_snps_phy.h" #include "intel_tc.h" @@ -131,7 +123,6 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); -static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) @@ -178,7 +169,7 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, return hpll; } -static void intel_update_czclk(struct drm_i915_private *dev_priv) +void intel_update_czclk(struct drm_i915_private *dev_priv) { if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) return; @@ -234,7 +225,7 @@ is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) return crtc_state->master_transcoder != INVALID_TRANSCODER; } -static bool +bool is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) { return crtc_state->sync_mode_slaves_mask != 0; @@ -331,20 +322,21 @@ void assert_transcoder(struct drm_i915_private *dev_priv, cur_state = false; } - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "transcoder %s assertion failure (expected %s, current %s)\n", - transcoder_name(cpu_transcoder), - str_on_off(state), str_on_off(cur_state)); + transcoder_name(cpu_transcoder), str_on_off(state), + str_on_off(cur_state)); } static void assert_plane(struct intel_plane *plane, bool state) { + struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe; bool cur_state; cur_state = plane->get_hw_state(plane, &pipe); - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(i915, cur_state != state, "%s assertion failure (expected %s, current %s)\n", plane->base.name, str_on_off(state), str_on_off(cur_state)); @@ -701,175 +693,6 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) return y; } -static int -intel_display_commit_duplicated_state(struct intel_atomic_state *state, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - int ret; - - ret = drm_atomic_helper_commit_duplicated_state(&state->base, ctx); - - drm_WARN_ON(&i915->drm, ret == -EDEADLK); - - return ret; -} - -static int -__intel_display_resume(struct drm_i915_private *i915, - struct drm_atomic_state *state, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - intel_modeset_setup_hw_state(i915, ctx); - intel_vga_redisable(i915); - - if (!state) - return 0; - - /* - * We've duplicated the state, pointers to the old state are invalid. - * - * Don't attempt to use the old state until we commit the duplicated state. - */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - /* - * Force recalculation even if we restore - * current state. With fast modeset this may not result - * in a modeset when the state is compatible. - */ - crtc_state->mode_changed = true; - } - - /* ignore any reset values/BIOS leftovers in the WM registers */ - if (!HAS_GMCH(i915)) - to_intel_atomic_state(state)->skip_intermediate_wm = true; - - return intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx); -} - -static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) -{ - return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && - intel_has_gpu_reset(to_gt(dev_priv))); -} - -void intel_display_prepare_reset(struct drm_i915_private *dev_priv) -{ - struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx; - struct drm_atomic_state *state; - int ret; - - if (!HAS_DISPLAY(dev_priv)) - return; - - /* reset doesn't touch the display */ - if (!dev_priv->params.force_reset_modeset_test && - !gpu_reset_clobbers_display(dev_priv)) - return; - - /* We have a modeset vs reset deadlock, defensively unbreak it. */ - set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); - smp_mb__after_atomic(); - wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); - - if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { - drm_dbg_kms(&dev_priv->drm, - "Modeset potentially stuck, unbreaking through wedging\n"); - intel_gt_set_wedged(to_gt(dev_priv)); - } - - /* - * Need mode_config.mutex so that we don't - * trample ongoing ->detect() and whatnot. - */ - mutex_lock(&dev_priv->drm.mode_config.mutex); - drm_modeset_acquire_init(ctx, 0); - while (1) { - ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx); - if (ret != -EDEADLK) - break; - - drm_modeset_backoff(ctx); - } - /* - * Disabling the crtcs gracefully seems nicer. Also the - * g33 docs say we should at least disable all the planes. - */ - state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx); - if (IS_ERR(state)) { - ret = PTR_ERR(state); - drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", - ret); - return; - } - - ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx); - if (ret) { - drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", - ret); - drm_atomic_state_put(state); - return; - } - - dev_priv->display.restore.modeset_state = state; - state->acquire_ctx = ctx; -} - -void intel_display_finish_reset(struct drm_i915_private *i915) -{ - struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx; - struct drm_atomic_state *state; - int ret; - - if (!HAS_DISPLAY(i915)) - return; - - /* reset doesn't touch the display */ - if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags)) - return; - - state = fetch_and_zero(&i915->display.restore.modeset_state); - if (!state) - goto unlock; - - /* reset doesn't touch the display */ - if (!gpu_reset_clobbers_display(i915)) { - /* for testing only restore the display */ - ret = intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx); - if (ret) - drm_err(&i915->drm, - "Restoring old state failed with %i\n", ret); - } else { - /* - * The display has been reset as well, - * so need a full re-initialization. - */ - intel_pps_unlock_regs_wa(i915); - intel_modeset_init_hw(i915); - intel_clock_gating_init(i915); - intel_hpd_init(i915); - - ret = __intel_display_resume(i915, state, ctx); - if (ret) - drm_err(&i915->drm, - "Restoring old state failed with %i\n", ret); - - intel_hpd_poll_disable(i915); - } - - drm_atomic_state_put(state); -unlock: - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); - mutex_unlock(&i915->drm.mode_config.mutex); - - clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags); -} - static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -990,8 +813,10 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) else intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); - intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y); - intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); + intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), + PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); + intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), + PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) @@ -1069,20 +894,40 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); } +#define is_enabling(feature, old_crtc_state, new_crtc_state) \ + ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ + (new_crtc_state)->feature) +#define is_disabling(feature, old_crtc_state, new_crtc_state) \ + ((old_crtc_state)->feature && \ + (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) + static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && - new_crtc_state->active_planes; + return is_enabling(active_planes, old_crtc_state, new_crtc_state); } static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - return old_crtc_state->active_planes && - (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); + return is_disabling(active_planes, old_crtc_state, new_crtc_state); +} + +static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + return is_enabling(vrr.enable, old_crtc_state, new_crtc_state); +} + +static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + return is_disabling(vrr.enable, old_crtc_state, new_crtc_state); } +#undef is_disabling +#undef is_enabling + static void intel_post_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1196,6 +1041,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + if (vrr_disabling(old_crtc_state, new_crtc_state)) { + intel_vrr_disable(old_crtc_state); + intel_crtc_update_active_timings(old_crtc_state, false); + } + intel_drrs_deactivate(old_crtc_state); intel_psr_pre_plane_update(state, crtc); @@ -1676,6 +1526,8 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta } intel_set_transcoder_timings(crtc_state); + if (HAS_VRR(dev_priv)) + intel_vrr_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), @@ -1851,9 +1703,17 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_disable_shared_dpll(old_crtc_state); - intel_encoders_post_pll_disable(state, crtc); + if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { + struct intel_crtc *slave_crtc; + + intel_encoders_post_pll_disable(state, crtc); - intel_dmc_disable_pipe(i915, crtc->pipe); + intel_dmc_disable_pipe(i915, crtc->pipe); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) + intel_dmc_disable_pipe(i915, slave_crtc->pipe); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -1907,7 +1767,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) if (IS_DG2(dev_priv)) /* DG2's "TC1" output uses a SNPS PHY */ return false; - else if (IS_ALDERLAKE_P(dev_priv)) + else if (IS_ALDERLAKE_P(dev_priv) || IS_METEORLAKE(dev_priv)) return phy >= PHY_F && phy <= PHY_I; else if (IS_TIGERLAKE(dev_priv)) return phy >= PHY_D && phy <= PHY_I; @@ -2214,30 +2074,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, i830_enable_pipe(dev_priv, pipe); } - -/* - * turn all crtc's off, but do not adjust state - * This has to be paired with a call to intel_modeset_setup_hw_state. - */ -int intel_display_suspend(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *state; - int ret; - - if (!HAS_DISPLAY(dev_priv)) - return 0; - - state = drm_atomic_helper_suspend(dev); - ret = PTR_ERR_OR_ZERO(state); - if (ret) - drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", - ret); - else - dev_priv->display.restore.modeset_state = state; - return ret; -} - void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); @@ -2567,7 +2403,7 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 0x80000); } -static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) +void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) { /* * There may be no VBT; and if the BIOS enabled SSC we can @@ -2904,6 +2740,9 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); + if (crtc_state->wgc_enable) + val |= TRANSCONF_WGC_ENABLE; + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); @@ -2923,6 +2762,7 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe; u32 tmp; if (!i9xx_has_pfit(dev_priv)) @@ -2933,13 +2773,13 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) return; /* Check whether the pfit is attached to our pipe. */ - if (DISPLAY_VER(dev_priv) < 4) { - if (crtc->pipe != PIPE_B) - return; - } else { - if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) - return; - } + if (DISPLAY_VER(dev_priv) >= 4) + pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); + else + pipe = PIPE_B; + + if (pipe != crtc->pipe) + return; crtc_state->gmch_pfit.control = tmp; crtc_state->gmch_pfit.pgm_ratios = @@ -3061,6 +2901,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + pipe_config->sink_format = pipe_config->output_format; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; @@ -3096,6 +2937,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + (tmp & TRANSCONF_WGC_ENABLE)) + pipe_config->wgc_enable = true; + if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, CGM_PIPE_MODE(crtc->pipe)); @@ -3398,73 +3243,39 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } -static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, - u32 pos, u32 size) -{ - drm_rect_init(&crtc_state->pch_pfit.dst, - pos >> 16, pos & 0xffff, - size >> 16, size & 0xffff); -} - -static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; - int id = -1; - int i; - - /* find scaler attached to this pipe */ - for (i = 0; i < crtc->num_scalers; i++) { - u32 ctl, pos, size; - - ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); - if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) - continue; - - id = i; - crtc_state->pch_pfit.enabled = true; - - pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); - size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); - - ilk_get_pfit_pos_size(crtc_state, pos, size); - - scaler_state->scalers[i].in_use = true; - break; - } - - scaler_state->scaler_id = id; - if (id >= 0) - scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); - else - scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); -} - static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 ctl, pos, size; + enum pipe pipe; ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); if ((ctl & PF_ENABLE) == 0) return; + if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) + pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); + else + pipe = crtc->pipe; + crtc_state->pch_pfit.enabled = true; pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); - ilk_get_pfit_pos_size(crtc_state, pos, size); + drm_rect_init(&crtc_state->pch_pfit.dst, + REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), + REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), + REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), + REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); /* * We currently do not free assignements of panel fitters on * ivb/hsw (since we don't use the higher upscaling modes which * differentiates them) so just WARN about this case for now. */ - drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 && - (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); + drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); } static bool ilk_get_pipe_config(struct intel_crtc *crtc, @@ -3520,6 +3331,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, break; } + pipe_config->sink_format = pipe_config->output_format; + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; @@ -3560,7 +3373,7 @@ static u8 bigjoiner_pipes(struct drm_i915_private *i915) else pipes = 0; - return pipes & RUNTIME_INFO(i915)->pipe_mask; + return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; } static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, @@ -3901,7 +3714,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, intel_get_transcoder_timings(crtc, pipe_config); if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) - intel_vrr_get_config(crtc, pipe_config); + intel_vrr_get_config(pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -3918,6 +3731,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, bdw_get_pipe_misc_output_format(crtc); } + pipe_config->sink_format = pipe_config->output_format; + pipe_config->gamma_mode = intel_de_read(dev_priv, GAMMA_MODE(crtc->pipe)); @@ -3947,7 +3762,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { if (DISPLAY_VER(dev_priv) >= 9) - skl_get_pfit_config(pipe_config); + skl_scaler_get_config(pipe_config); else ilk_get_pfit_config(pipe_config); } @@ -3995,218 +3810,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) return true; } -/* VESA 640x480x72Hz mode to set on the pipe */ -static const struct drm_display_mode load_detect_mode = { - DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, - 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), -}; - -static int intel_modeset_disable_planes(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - struct drm_plane *plane; - struct drm_plane_state *plane_state; - int ret, i; - - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - return ret; - - for_each_new_plane_in_state(state, plane, plane_state, i) { - if (plane_state->crtc != crtc) - continue; - - ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); - if (ret) - return ret; - - drm_atomic_set_fb_for_plane(plane_state, NULL); - } - - return 0; -} - -int intel_get_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx) -{ - struct intel_encoder *encoder = - intel_attached_encoder(to_intel_connector(connector)); - struct intel_crtc *possible_crtc; - struct intel_crtc *crtc = NULL; - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_mode_config *config = &dev->mode_config; - struct drm_atomic_state *state = NULL, *restore_state = NULL; - struct drm_connector_state *connector_state; - struct intel_crtc_state *crtc_state; - int ret; - - drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", - connector->base.id, connector->name, - encoder->base.base.id, encoder->base.name); - - old->restore_state = NULL; - - drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); - - /* - * Algorithm gets a little messy: - * - * - if the connector already has an assigned crtc, use it (but make - * sure it's on first) - * - * - try to find the first unused crtc that can drive this connector, - * and use that if we find one - */ - - /* See if we already have a CRTC for this connector */ - if (connector->state->crtc) { - crtc = to_intel_crtc(connector->state->crtc); - - ret = drm_modeset_lock(&crtc->base.mutex, ctx); - if (ret) - goto fail; - - /* Make sure the crtc and connector are running */ - goto found; - } - - /* Find an unused one (if possible) */ - for_each_intel_crtc(dev, possible_crtc) { - if (!(encoder->base.possible_crtcs & - drm_crtc_mask(&possible_crtc->base))) - continue; - - ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); - if (ret) - goto fail; - - if (possible_crtc->base.state->enable) { - drm_modeset_unlock(&possible_crtc->base.mutex); - continue; - } - - crtc = possible_crtc; - break; - } - - /* - * If we didn't find an unused CRTC, don't use any. - */ - if (!crtc) { - drm_dbg_kms(&dev_priv->drm, - "no pipe available for load-detect\n"); - ret = -ENODEV; - goto fail; - } - -found: - state = drm_atomic_state_alloc(dev); - restore_state = drm_atomic_state_alloc(dev); - if (!state || !restore_state) { - ret = -ENOMEM; - goto fail; - } - - state->acquire_ctx = ctx; - restore_state->acquire_ctx = ctx; - - connector_state = drm_atomic_get_connector_state(state, connector); - if (IS_ERR(connector_state)) { - ret = PTR_ERR(connector_state); - goto fail; - } - - ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); - if (ret) - goto fail; - - crtc_state = intel_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) { - ret = PTR_ERR(crtc_state); - goto fail; - } - - crtc_state->uapi.active = true; - - ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, - &load_detect_mode); - if (ret) - goto fail; - - ret = intel_modeset_disable_planes(state, &crtc->base); - if (ret) - goto fail; - - ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); - if (!ret) - ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); - if (!ret) - ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "Failed to create a copy of old state to restore: %i\n", - ret); - goto fail; - } - - ret = drm_atomic_commit(state); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "failed to set mode on load-detect pipe\n"); - goto fail; - } - - old->restore_state = restore_state; - drm_atomic_state_put(state); - - /* let the connector get through one full cycle before testing */ - intel_crtc_wait_for_next_vblank(crtc); - - return true; - -fail: - if (state) { - drm_atomic_state_put(state); - state = NULL; - } - if (restore_state) { - drm_atomic_state_put(restore_state); - restore_state = NULL; - } - - if (ret == -EDEADLK) - return ret; - - return false; -} - -void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx) -{ - struct intel_encoder *intel_encoder = - intel_attached_encoder(to_intel_connector(connector)); - struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_atomic_state *state = old->restore_state; - int ret; - - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", - connector->base.id, connector->name, - encoder->base.id, encoder->name); - - if (!state) - return; - - ret = drm_atomic_helper_commit_duplicated_state(state, ctx); - if (ret) - drm_dbg_kms(&i915->drm, - "Couldn't release load detect pipe: %i\n", ret); - drm_atomic_state_put(state); -} - static int i9xx_pll_refclk(struct drm_device *dev, const struct intel_crtc_state *pipe_config) { @@ -5269,7 +4872,7 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, return; drm_dbg_kms(&dev_priv->drm, - "fastset mismatch in %s infoframe\n", name); + "fastset requirement not met in %s infoframe\n", name); drm_dbg_kms(&dev_priv->drm, "expected:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); drm_dbg_kms(&dev_priv->drm, "found:\n"); @@ -5294,7 +4897,7 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, return; drm_dbg_kms(&dev_priv->drm, - "fastset mismatch in %s dp sdp\n", name); + "fastset requirement not met in %s dp sdp\n", name); drm_dbg_kms(&dev_priv->drm, "expected:\n"); drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); drm_dbg_kms(&dev_priv->drm, "found:\n"); @@ -5335,7 +4938,7 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, len = memcmp_diff_len(a, b, len); drm_dbg_kms(&dev_priv->drm, - "fastset mismatch in %s buffer\n", name); + "fastset requirement not met in %s buffer\n", name); print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, @@ -5366,7 +4969,7 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, if (fastset) drm_dbg_kms(&i915->drm, - "[CRTC:%d:%s] fastset mismatch in %s %pV\n", + "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", crtc->base.base.id, crtc->base.name, name, &vaf); else drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", @@ -5573,6 +5176,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) +#define PIPE_CONF_CHECK_CSC(name) do { \ + PIPE_CONF_CHECK_X(name.preoff[0]); \ + PIPE_CONF_CHECK_X(name.preoff[1]); \ + PIPE_CONF_CHECK_X(name.preoff[2]); \ + PIPE_CONF_CHECK_X(name.coeff[0]); \ + PIPE_CONF_CHECK_X(name.coeff[1]); \ + PIPE_CONF_CHECK_X(name.coeff[2]); \ + PIPE_CONF_CHECK_X(name.coeff[3]); \ + PIPE_CONF_CHECK_X(name.coeff[4]); \ + PIPE_CONF_CHECK_X(name.coeff[5]); \ + PIPE_CONF_CHECK_X(name.coeff[6]); \ + PIPE_CONF_CHECK_X(name.coeff[7]); \ + PIPE_CONF_CHECK_X(name.coeff[8]); \ + PIPE_CONF_CHECK_X(name.postoff[0]); \ + PIPE_CONF_CHECK_X(name.postoff[1]); \ + PIPE_CONF_CHECK_X(name.postoff[2]); \ +} while (0) + #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) @@ -5663,6 +5284,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(csc_mode); PIPE_CONF_CHECK_BOOL(gamma_enable); PIPE_CONF_CHECK_BOOL(csc_enable); + PIPE_CONF_CHECK_BOOL(wgc_enable); PIPE_CONF_CHECK_I(linetime); PIPE_CONF_CHECK_I(ips_linetime); @@ -5670,6 +5292,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); + PIPE_CONF_CHECK_CSC(csc); + PIPE_CONF_CHECK_CSC(output_csc); + if (current_config->active_planes) { PIPE_CONF_CHECK_BOOL(has_psr); PIPE_CONF_CHECK_BOOL(has_psr2); @@ -5756,7 +5381,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(splitter.link_count); PIPE_CONF_CHECK_I(splitter.pixel_overlap); - PIPE_CONF_CHECK_BOOL(vrr.enable); + if (!fastset) + PIPE_CONF_CHECK_BOOL(vrr.enable); PIPE_CONF_CHECK_I(vrr.vmin); PIPE_CONF_CHECK_I(vrr.vmax); PIPE_CONF_CHECK_I(vrr.flipline); @@ -5932,8 +5558,13 @@ static int intel_modeset_checks(struct intel_atomic_state *state) static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) + struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); + + if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { + drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); + return; + } new_crtc_state->uapi.mode_changed = false; if (!intel_crtc_needs_modeset(new_crtc_state)) @@ -6372,6 +6003,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in * this selectively if required. */ switch (new_plane_state->hw.fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + /* + * FIXME: Async on Linear buffer is supported on ICL as + * but with additional alignment and fbc restrictions + * need to be taken care of. These aren't applicable for + * gen12+. + */ + if (DISPLAY_VER(i915) < 12) { + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] Modifier does not support async flips\n", + plane->base.base.id, plane->base.name); + return -EINVAL; + } + break; + case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -6536,6 +6182,13 @@ int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + /* + * crtc's state no longer considered to be inherited + * after the first userspace/client initiated commit. + */ + if (!state->internal) + new_crtc_state->inherited = false; + if (new_crtc_state->inherited != old_crtc_state->inherited) new_crtc_state->uapi.mode_changed = true; @@ -6881,7 +6534,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state, if (!intel_crtc_needs_modeset(new_crtc_state)) return; - intel_crtc_update_active_timings(new_crtc_state); + /* VRR will be enable later, if required */ + intel_crtc_update_active_timings(new_crtc_state, false); dev_priv->display.funcs.display->crtc_enable(state, crtc); @@ -6908,6 +6562,12 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_dpt_configure(crtc); } + if (vrr_enabling(old_crtc_state, new_crtc_state)) { + intel_vrr_enable(new_crtc_state); + intel_crtc_update_active_timings(new_crtc_state, + new_crtc_state->vrr.enable); + } + if (!modeset) { if (new_crtc_state->preload_luts && intel_crtc_needs_color_update(new_crtc_state)) @@ -7181,7 +6841,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) drm_atomic_state_put(&state->base); } -static void intel_atomic_helper_free_state_worker(struct work_struct *work) +void intel_atomic_helper_free_state_worker(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); @@ -7313,6 +6973,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * 7. New _arm() registers are finally written * 8. Hardware finally latches a complete set of new * register values, and subsequent frames will be OK again + * + * Also note that due to the pipe CSC hardware issues on + * SKL/GLK DC states must remain off until the pipe CSC + * state readout has happened. Otherwise we risk corrupting + * the CSC latched register values with the readout (see + * skl_read_csc() and skl_color_commit_noarm()). */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); @@ -7523,9 +7189,8 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } -static int intel_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *_state, - bool nonblock) +int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, + bool nonblock) { struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_i915_private *dev_priv = to_i915(dev); @@ -7627,19 +7292,6 @@ void intel_plane_destroy(struct drm_plane *plane) kfree(to_intel_plane(plane)); } -static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) -{ - struct intel_plane *plane; - - for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, - plane->pipe); - - plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); - } -} - - int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -7719,7 +7371,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) return true; } -static void intel_setup_outputs(struct drm_i915_private *dev_priv) +void intel_setup_outputs(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; bool dpd_is_edp = false; @@ -7729,7 +7381,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_DG2(dev_priv)) { + if (IS_METEORLAKE(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_TC1); + intel_ddi_init(dev_priv, PORT_TC2); + intel_ddi_init(dev_priv, PORT_TC3); + intel_ddi_init(dev_priv, PORT_TC4); + } else if (IS_DG2(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); @@ -7971,9 +7630,8 @@ static int max_dotclock(struct drm_i915_private *i915) return max_dotclock; } -static enum drm_mode_status -intel_mode_valid(struct drm_device *dev, - const struct drm_display_mode *mode) +enum drm_mode_status intel_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); int hdisplay_max, htotal_max; @@ -8113,18 +7771,6 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, return MODE_OK; } -static const struct drm_mode_config_funcs intel_mode_funcs = { - .fb_create = intel_user_framebuffer_create, - .get_format_info = intel_fb_get_format_info, - .output_poll_changed = intel_fbdev_output_poll_changed, - .mode_valid = intel_mode_valid, - .atomic_check = intel_atomic_check, - .atomic_commit = intel_atomic_commit, - .atomic_state_alloc = intel_atomic_state_alloc, - .atomic_state_clear = intel_atomic_state_clear, - .atomic_state_free = intel_atomic_state_free, -}; - static const struct intel_display_funcs skl_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, @@ -8171,15 +7817,6 @@ static const struct intel_display_funcs i9xx_display_funcs = { */ void intel_init_display_hooks(struct drm_i915_private *dev_priv) { - if (!HAS_DISPLAY(dev_priv)) - return; - - intel_color_init_hooks(dev_priv); - intel_init_cdclk_hooks(dev_priv); - intel_audio_hooks_init(dev_priv); - - intel_dpll_init_clock_hook(dev_priv); - if (DISPLAY_VER(dev_priv) >= 9) { dev_priv->display.funcs.display = &skl_display_funcs; } else if (HAS_DDI(dev_priv)) { @@ -8192,25 +7829,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } else { dev_priv->display.funcs.display = &i9xx_display_funcs; } - - intel_fdi_init_hook(dev_priv); -} - -void intel_modeset_init_hw(struct drm_i915_private *i915) -{ - struct intel_cdclk_state *cdclk_state; - - if (!HAS_DISPLAY(i915)) - return; - - cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); - - intel_update_cdclk(i915); - intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } -static int intel_initial_commit(struct drm_device *dev) +int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; struct drm_modeset_acquire_ctx ctx; @@ -8223,9 +7844,10 @@ static int intel_initial_commit(struct drm_device *dev) drm_modeset_acquire_init(&ctx, 0); -retry: state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; +retry: for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_crtc_state(state, crtc); @@ -8238,15 +7860,6 @@ retry: if (crtc_state->hw.active) { struct intel_encoder *encoder; - /* - * We've not yet detected sink capabilities - * (audio,infoframes,etc.) and thus we don't want to - * force a full state recomputation yet. We want that to - * happen only for the first real commit from userspace. - * So preserve the inherited flag for the time being. - */ - crtc_state->inherited = true; - ret = drm_atomic_add_affected_planes(state, &crtc->base); if (ret) goto out; @@ -8289,246 +7902,6 @@ out: return ret; } -static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { - .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, -}; - -static void intel_mode_config_init(struct drm_i915_private *i915) -{ - struct drm_mode_config *mode_config = &i915->drm.mode_config; - - drm_mode_config_init(&i915->drm); - INIT_LIST_HEAD(&i915->display.global.obj_list); - - mode_config->min_width = 0; - mode_config->min_height = 0; - - mode_config->preferred_depth = 24; - mode_config->prefer_shadow = 1; - - mode_config->funcs = &intel_mode_funcs; - mode_config->helper_private = &intel_mode_config_funcs; - - mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); - - /* - * Maximum framebuffer dimensions, chosen to match - * the maximum render engine surface size on gen4+. - */ - if (DISPLAY_VER(i915) >= 7) { - mode_config->max_width = 16384; - mode_config->max_height = 16384; - } else if (DISPLAY_VER(i915) >= 4) { - mode_config->max_width = 8192; - mode_config->max_height = 8192; - } else if (DISPLAY_VER(i915) == 3) { - mode_config->max_width = 4096; - mode_config->max_height = 4096; - } else { - mode_config->max_width = 2048; - mode_config->max_height = 2048; - } - - if (IS_I845G(i915) || IS_I865G(i915)) { - mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; - mode_config->cursor_height = 1023; - } else if (IS_I830(i915) || IS_I85X(i915) || - IS_I915G(i915) || IS_I915GM(i915)) { - mode_config->cursor_width = 64; - mode_config->cursor_height = 64; - } else { - mode_config->cursor_width = 256; - mode_config->cursor_height = 256; - } -} - -static void intel_mode_config_cleanup(struct drm_i915_private *i915) -{ - intel_atomic_global_obj_cleanup(i915); - drm_mode_config_cleanup(&i915->drm); -} - -/* part #1: call before irq install */ -int intel_modeset_init_noirq(struct drm_i915_private *i915) -{ - int ret; - - if (i915_inject_probe_failure(i915)) - return -ENODEV; - - if (HAS_DISPLAY(i915)) { - ret = drm_vblank_init(&i915->drm, - INTEL_NUM_PIPES(i915)); - if (ret) - return ret; - } - - intel_bios_init(i915); - - ret = intel_vga_register(i915); - if (ret) - goto cleanup_bios; - - /* FIXME: completely on the wrong abstraction layer */ - ret = intel_power_domains_init(i915); - if (ret < 0) - goto cleanup_vga; - - intel_power_domains_init_hw(i915, false); - - if (!HAS_DISPLAY(i915)) - return 0; - - intel_dmc_init(i915); - - i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); - i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | - WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - - intel_mode_config_init(i915); - - ret = intel_cdclk_init(i915); - if (ret) - goto cleanup_vga_client_pw_domain_dmc; - - ret = intel_color_init(i915); - if (ret) - goto cleanup_vga_client_pw_domain_dmc; - - ret = intel_dbuf_init(i915); - if (ret) - goto cleanup_vga_client_pw_domain_dmc; - - ret = intel_bw_init(i915); - if (ret) - goto cleanup_vga_client_pw_domain_dmc; - - init_llist_head(&i915->display.atomic_helper.free_list); - INIT_WORK(&i915->display.atomic_helper.free_work, - intel_atomic_helper_free_state_worker); - - intel_init_quirks(i915); - - intel_fbc_init(i915); - - return 0; - -cleanup_vga_client_pw_domain_dmc: - intel_dmc_fini(i915); - intel_power_domains_driver_remove(i915); -cleanup_vga: - intel_vga_unregister(i915); -cleanup_bios: - intel_bios_driver_remove(i915); - - return ret; -} - -/* part #2: call after irq install, but before gem init */ -int intel_modeset_init_nogem(struct drm_i915_private *i915) -{ - struct drm_device *dev = &i915->drm; - enum pipe pipe; - struct intel_crtc *crtc; - int ret; - - if (!HAS_DISPLAY(i915)) - return 0; - - intel_wm_init(i915); - - intel_panel_sanitize_ssc(i915); - - intel_pps_setup(i915); - - intel_gmbus_setup(i915); - - drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", - INTEL_NUM_PIPES(i915), - INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); - - for_each_pipe(i915, pipe) { - ret = intel_crtc_init(i915, pipe); - if (ret) { - intel_mode_config_cleanup(i915); - return ret; - } - } - - intel_plane_possible_crtcs_init(i915); - intel_shared_dpll_init(i915); - intel_fdi_pll_freq_update(i915); - - intel_update_czclk(i915); - intel_modeset_init_hw(i915); - intel_dpll_update_ref_clks(i915); - - intel_hdcp_component_init(i915); - - if (i915->display.cdclk.max_cdclk_freq == 0) - intel_update_max_cdclk(i915); - - intel_hti_init(i915); - - /* Just disable it once at startup */ - intel_vga_disable(i915); - intel_setup_outputs(i915); - - drm_modeset_lock_all(dev); - intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); - intel_acpi_assign_connector_fwnodes(i915); - drm_modeset_unlock_all(dev); - - for_each_intel_crtc(dev, crtc) { - if (!to_intel_crtc_state(crtc->base.state)->uapi.active) - continue; - intel_crtc_initial_plane_config(crtc); - } - - /* - * Make sure hardware watermarks really match the state we read out. - * Note that we need to do this after reconstructing the BIOS fb's - * since the watermark calculation done here will use pstate->fb. - */ - if (!HAS_GMCH(i915)) - ilk_wm_sanitize(i915); - - return 0; -} - -/* part #3: call after gem init */ -int intel_modeset_init(struct drm_i915_private *i915) -{ - int ret; - - if (!HAS_DISPLAY(i915)) - return 0; - - /* - * Force all active planes to recompute their states. So that on - * mode_setcrtc after probe, all the intel_plane_state variables - * are already calculated and there is no assert_plane warnings - * during bootup. - */ - ret = intel_initial_commit(&i915->drm); - if (ret) - drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); - - intel_overlay_setup(i915); - - ret = intel_fbdev_init(&i915->drm); - if (ret) - return ret; - - /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(i915); - intel_hpd_poll_disable(i915); - - skl_watermark_ipc_init(i915); - - return 0; -} - void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); @@ -8636,45 +8009,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) intel_de_posting_read(dev_priv, DPLL(pipe)); } -void intel_display_resume(struct drm_device *dev) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct drm_atomic_state *state = i915->display.restore.modeset_state; - struct drm_modeset_acquire_ctx ctx; - int ret; - - if (!HAS_DISPLAY(i915)) - return; - - i915->display.restore.modeset_state = NULL; - if (state) - state->acquire_ctx = &ctx; - - drm_modeset_acquire_init(&ctx, 0); - - while (1) { - ret = drm_modeset_lock_all_ctx(dev, &ctx); - if (ret != -EDEADLK) - break; - - drm_modeset_backoff(&ctx); - } - - if (!ret) - ret = __intel_display_resume(i915, state, &ctx); - - skl_watermark_ipc_update(i915); - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - if (ret) - drm_err(&i915->drm, - "Restoring old state failed with %i\n", ret); - if (state) - drm_atomic_state_put(state); -} - -static void intel_hpd_poll_fini(struct drm_i915_private *i915) +void intel_hpd_poll_fini(struct drm_i915_private *i915) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; @@ -8692,144 +8027,6 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); } -/* part #1: call before irq uninstall */ -void intel_modeset_driver_remove(struct drm_i915_private *i915) -{ - if (!HAS_DISPLAY(i915)) - return; - - flush_workqueue(i915->display.wq.flip); - flush_workqueue(i915->display.wq.modeset); - - flush_work(&i915->display.atomic_helper.free_work); - drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); - - /* - * MST topology needs to be suspended so we don't have any calls to - * fbdev after it's finalized. MST will be destroyed later as part of - * drm_mode_config_cleanup() - */ - intel_dp_mst_suspend(i915); -} - -/* part #2: call after irq uninstall */ -void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) -{ - if (!HAS_DISPLAY(i915)) - return; - - /* - * Due to the hpd irq storm handling the hotplug work can re-arm the - * poll handlers. Hence disable polling after hpd handling is shut down. - */ - intel_hpd_poll_fini(i915); - - /* poll work can call into fbdev, hence clean that up afterwards */ - intel_fbdev_fini(i915); - - intel_unregister_dsm_handler(); - - /* flush any delayed tasks or pending work */ - flush_scheduled_work(); - - intel_hdcp_component_fini(i915); - - intel_mode_config_cleanup(i915); - - intel_overlay_cleanup(i915); - - intel_gmbus_teardown(i915); - - destroy_workqueue(i915->display.wq.flip); - destroy_workqueue(i915->display.wq.modeset); - - intel_fbc_cleanup(i915); -} - -/* part #3: call after gem init */ -void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) -{ - intel_dmc_fini(i915); - - intel_power_domains_driver_remove(i915); - - intel_vga_unregister(i915); - - intel_bios_driver_remove(i915); -} - -bool intel_modeset_probe_defer(struct pci_dev *pdev) -{ - struct drm_privacy_screen *privacy_screen; - - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (vga_switcheroo_client_probe_defer(pdev)) - return true; - - /* If the LCD panel has a privacy-screen, wait for it */ - privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); - if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) - return true; - - drm_privacy_screen_put(privacy_screen); - - return false; -} - -void intel_display_driver_register(struct drm_i915_private *i915) -{ - if (!HAS_DISPLAY(i915)) - return; - - /* Must be done after probing outputs */ - intel_opregion_register(i915); - intel_acpi_video_register(i915); - - intel_audio_init(i915); - - intel_display_debugfs_register(i915); - - /* - * Some ports require correctly set-up hpd registers for - * detection to work properly (leading to ghost connected - * connector status), e.g. VGA on gm45. Hence we can only set - * up the initial fbdev config after hpd irqs are fully - * enabled. We do it last so that the async config cannot run - * before the connectors are registered. - */ - intel_fbdev_initial_config_async(i915); - - /* - * We need to coordinate the hotplugs with the asynchronous - * fbdev configuration, for which we use the - * fbdev->async_cookie. - */ - drm_kms_helper_poll_init(&i915->drm); -} - -void intel_display_driver_unregister(struct drm_i915_private *i915) -{ - if (!HAS_DISPLAY(i915)) - return; - - intel_fbdev_unregister(i915); - intel_audio_deinit(i915); - - /* - * After flushing the fbdev (incl. a late async config which - * will have delayed queuing of a hotplug event), then flush - * the hotplug events. - */ - drm_kms_helper_poll_fini(&i915->drm); - drm_atomic_helper_shutdown(&i915->drm); - - acpi_video_unregister(); - intel_opregion_unregister(i915); -} - bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 287159bdeb0d..c744c021af23 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -56,13 +56,13 @@ struct intel_dp; struct intel_encoder; struct intel_initial_plane_config; struct intel_link_m_n; -struct intel_load_detect_pipe; struct intel_plane; struct intel_plane_state; struct intel_power_domain_mask; struct intel_remapped_info; struct intel_rotation_info; struct pci_dev; +struct work_struct; #define pipe_name(p) ((p) + 'A') @@ -105,7 +105,7 @@ enum i9xx_plane_id { }; #define plane_name(p) ((p) + 'A') -#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') +#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') #define for_each_plane_id_on_crtc(__crtc, __p) \ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ @@ -113,7 +113,7 @@ enum i9xx_plane_id { #define for_each_dbuf_slice(__dev_priv, __slice) \ for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display.dbuf.slice_mask & BIT(__slice)) + for_each_if(INTEL_INFO(__dev_priv)->display->dbuf.slice_mask & BIT(__slice)) #define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \ for_each_dbuf_slice((__dev_priv), (__slice)) \ @@ -221,7 +221,7 @@ enum phy_fia { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p)) + for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -229,7 +229,7 @@ enum phy_fia { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) + for_each_if (DISPLAY_RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -237,7 +237,7 @@ enum phy_fia { #define for_each_sprite(__dev_priv, __p, __s) \ for ((__s) = 0; \ - (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \ + (__s) < DISPLAY_RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \ (__s)++) #define for_each_port(__port) \ @@ -407,6 +407,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, bool bigjoiner); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); +bool is_trans_port_sync_master(const struct intel_crtc_state *state); bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state); u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state); @@ -437,7 +438,6 @@ void intel_add_fb_offsets(int *x, int *y, unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info); bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); -int intel_display_suspend(struct drm_device *dev); void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); @@ -455,20 +455,12 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask); -int intel_get_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); -void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); struct drm_framebuffer * intel_framebuffer_create(struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); bool intel_fuzzy_clock_check(int clock1, int clock2); -void intel_display_prepare_reset(struct drm_i915_private *dev_priv); -void intel_display_finish_reset(struct drm_i915_private *dev_priv); void intel_zero_m_n(struct intel_link_m_n *m_n); void intel_set_m_n(struct drm_i915_private *i915, const struct intel_link_m_n *m_n, @@ -518,21 +510,9 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state, bool visible); void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state); -void intel_display_driver_register(struct drm_i915_private *i915); -void intel_display_driver_unregister(struct drm_i915_private *i915); - void intel_update_watermarks(struct drm_i915_private *i915); /* modesetting */ -bool intel_modeset_probe_defer(struct pci_dev *pdev); -void intel_modeset_init_hw(struct drm_i915_private *i915); -int intel_modeset_init_noirq(struct drm_i915_private *i915); -int intel_modeset_init_nogem(struct drm_i915_private *i915); -int intel_modeset_init(struct drm_i915_private *i915); -void intel_modeset_driver_remove(struct drm_i915_private *i915); -void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); -void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); -void intel_display_resume(struct drm_device *dev); int intel_modeset_all_pipes(struct intel_atomic_state *state, const char *reason); void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, @@ -540,30 +520,41 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, struct intel_power_domain_mask *domains); +/* interface for intel_display_driver.c */ +void intel_setup_outputs(struct drm_i915_private *i915); +int intel_initial_commit(struct drm_device *dev); +void intel_panel_sanitize_ssc(struct drm_i915_private *i915); +void intel_update_czclk(struct drm_i915_private *i915); +void intel_atomic_helper_free_state_worker(struct work_struct *work); +enum drm_mode_status intel_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode); +int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, + bool nonblock); + +void intel_hpd_poll_fini(struct drm_i915_private *i915); + /* modesetting asserts */ void assert_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, bool state); #define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true) #define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false) -/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and - * WARN_ON()) for hw state sanity checks to check for unexpected conditions - * which may not necessarily be a user visible problem. This will either - * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to - * enable distros and users to tailor their preferred amount of i915 abrt - * spam. +/* + * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity + * checks to check for unexpected conditions which may not necessarily be a user + * visible problem. This will either WARN() or DRM_ERROR() depending on the + * verbose_state_checks module param, to enable distros and users to tailor + * their preferred amount of i915 abrt spam. */ -#define I915_STATE_WARN(condition, format...) ({ \ +#define I915_STATE_WARN(__i915, condition, format...) ({ \ + struct drm_device *drm = &(__i915)->drm; \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!WARN(i915_modparams.verbose_state_checks, format)) \ - DRM_ERROR(format); \ + if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \ + drm_err(drm, format); \ unlikely(__ret_warn_on); \ }) -#define I915_STATE_WARN_ON(x) \ - I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") - bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index e36f88a39b86..2209811eb29e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -30,7 +30,7 @@ struct drm_i915_private; struct drm_property; struct drm_property_blob; struct i915_audio_component; -struct i915_hdcp_comp_master; +struct i915_hdcp_arbiter; struct intel_atomic_state; struct intel_audio_funcs; struct intel_bios_encoder_data; @@ -395,7 +395,7 @@ struct intel_display { } gmbus; struct { - struct i915_hdcp_master *master; + struct i915_hdcp_arbiter *arbiter; bool comp_added; /* @@ -404,8 +404,8 @@ struct intel_display { * this is only populated post Meteorlake */ struct intel_hdcp_gsc_message *hdcp_message; - /* Mutex to protect the above hdcp component related values. */ - struct mutex comp_mutex; + /* Mutex to protect the above hdcp related values. */ + struct mutex hdcp_mutex; } hdcp; struct { diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 45113ae107ba..2a4df62692a6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -12,6 +12,7 @@ #include "i915_debugfs.h" #include "i915_irq.h" #include "i915_reg.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_crtc_state_dump.h" #include "intel_display_debugfs.h" @@ -30,7 +31,6 @@ #include "intel_panel.h" #include "intel_psr.h" #include "intel_psr_regs.h" -#include "intel_sprite.h" #include "intel_wm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -237,28 +237,26 @@ static void intel_dp_info(struct seq_file *m, seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", - str_yes_no(intel_dp->has_audio)); + str_yes_no(intel_connector->base.display_info.has_audio)); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, edid ? edid->data : NULL, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, - struct intel_connector *intel_connector) + struct intel_connector *connector) { - bool has_audio = intel_connector->port->has_audio; + bool has_audio = connector->base.display_info.has_audio; seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio)); } static void intel_hdmi_info(struct seq_file *m, - struct intel_connector *intel_connector) + struct intel_connector *connector) { - struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); + bool has_audio = connector->base.display_info.has_audio; - seq_printf(m, "\taudio support: %s\n", - str_yes_no(intel_hdmi->has_audio)); + seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio)); } static void intel_connector_info(struct seq_file *m, @@ -1094,6 +1092,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + intel_cdclk_debugfs_register(i915); intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c new file mode 100644 index 000000000000..464df1764a86 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/i915_pciids.h> +#include <drm/drm_color_mgmt.h> +#include <linux/pci.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_display_device.h" +#include "intel_display_power.h" +#include "intel_display_reg_defs.h" +#include "intel_fbc.h" + +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); + +static const struct intel_display_device_info no_display = {}; + +#define PIPE_A_OFFSET 0x70000 +#define PIPE_B_OFFSET 0x71000 +#define PIPE_C_OFFSET 0x72000 +#define PIPE_D_OFFSET 0x73000 +#define CHV_PIPE_C_OFFSET 0x74000 +/* + * There's actually no pipe EDP. Some pipe registers have + * simply shifted from the pipe to the transcoder, while + * keeping their original offset. Thus we need PIPE_EDP_OFFSET + * to access such registers in transcoder EDP. + */ +#define PIPE_EDP_OFFSET 0x7f000 + +/* ICL DSI 0 and 1 */ +#define PIPE_DSI0_OFFSET 0x7b000 +#define PIPE_DSI1_OFFSET 0x7b800 + +#define TRANSCODER_A_OFFSET 0x60000 +#define TRANSCODER_B_OFFSET 0x61000 +#define TRANSCODER_C_OFFSET 0x62000 +#define CHV_TRANSCODER_C_OFFSET 0x63000 +#define TRANSCODER_D_OFFSET 0x63000 +#define TRANSCODER_EDP_OFFSET 0x6f000 +#define TRANSCODER_DSI0_OFFSET 0x6b000 +#define TRANSCODER_DSI1_OFFSET 0x6b800 + +#define CURSOR_A_OFFSET 0x70080 +#define CURSOR_B_OFFSET 0x700c0 +#define CHV_CURSOR_C_OFFSET 0x700e0 +#define IVB_CURSOR_B_OFFSET 0x71080 +#define IVB_CURSOR_C_OFFSET 0x72080 +#define TGL_CURSOR_D_OFFSET 0x73080 + +#define I845_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + } + +#define I9XX_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + } + +#define IVB_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + } + +#define HSW_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ + } + +#define CHV_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ + } + +#define I845_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + } + +#define I9XX_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + } + +#define CHV_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + [PIPE_C] = CHV_CURSOR_C_OFFSET, \ + } + +#define IVB_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + } + +#define TGL_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + [PIPE_D] = TGL_CURSOR_D_OFFSET, \ + } + +#define I845_COLORS \ + .color = { .gamma_lut_size = 256 } +#define I9XX_COLORS \ + .color = { .gamma_lut_size = 129, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } +#define ILK_COLORS \ + .color = { .gamma_lut_size = 1024 } +#define IVB_COLORS \ + .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 } +#define CHV_COLORS \ + .color = { \ + .degamma_lut_size = 65, .gamma_lut_size = 257, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } +#define GLK_COLORS \ + .color = { \ + .degamma_lut_size = 33, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + } +#define ICL_COLORS \ + .color = { \ + .degamma_lut_size = 33, .gamma_lut_size = 262145, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } + +#define I830_DISPLAY \ + .has_overlay = 1, \ + .cursor_needs_physical = 1, \ + .overlay_needs_physical = 1, \ + .has_gmch = 1, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + \ + .__runtime_defaults.ip.ver = 2, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + +static const struct intel_display_device_info i830_display = { + I830_DISPLAY, +}; + +#define I845_DISPLAY \ + .has_overlay = 1, \ + .overlay_needs_physical = 1, \ + .has_gmch = 1, \ + I845_PIPE_OFFSETS, \ + I845_CURSOR_OFFSETS, \ + I845_COLORS, \ + \ + .__runtime_defaults.ip.ver = 2, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A), \ + .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) + +static const struct intel_display_device_info i845_display = { + I845_DISPLAY, +}; + +static const struct intel_display_device_info i85x_display = { + I830_DISPLAY, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info i865g_display = { + I845_DISPLAY, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +#define GEN3_DISPLAY \ + .has_gmch = 1, \ + .has_overlay = 1, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + \ + .__runtime_defaults.ip.ver = 3, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + +static const struct intel_display_device_info i915g_display = { + GEN3_DISPLAY, + .cursor_needs_physical = 1, + .overlay_needs_physical = 1, +}; + +static const struct intel_display_device_info i915gm_display = { + GEN3_DISPLAY, + .cursor_needs_physical = 1, + .overlay_needs_physical = 1, + .supports_tv = 1, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info i945g_display = { + GEN3_DISPLAY, + .has_hotplug = 1, + .cursor_needs_physical = 1, + .overlay_needs_physical = 1, +}; + +static const struct intel_display_device_info i945gm_display = { + GEN3_DISPLAY, + .has_hotplug = 1, + .cursor_needs_physical = 1, + .overlay_needs_physical = 1, + .supports_tv = 1, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info g33_display = { + GEN3_DISPLAY, + .has_hotplug = 1, +}; + +#define GEN4_DISPLAY \ + .has_hotplug = 1, \ + .has_gmch = 1, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + \ + .__runtime_defaults.ip.ver = 4, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + +static const struct intel_display_device_info i965g_display = { + GEN4_DISPLAY, + .has_overlay = 1, +}; + +static const struct intel_display_device_info i965gm_display = { + GEN4_DISPLAY, + .has_overlay = 1, + .supports_tv = 1, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info g45_display = { + GEN4_DISPLAY, +}; + +static const struct intel_display_device_info gm45_display = { + GEN4_DISPLAY, + .supports_tv = 1, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +#define ILK_DISPLAY \ + .has_hotplug = 1, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + ILK_COLORS, \ + \ + .__runtime_defaults.ip.ver = 5, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + +static const struct intel_display_device_info ilk_d_display = { + ILK_DISPLAY, +}; + +static const struct intel_display_device_info ilk_m_display = { + ILK_DISPLAY, + + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info snb_display = { + .has_hotplug = 1, + I9XX_PIPE_OFFSETS, + I9XX_CURSOR_OFFSETS, + ILK_COLORS, + + .__runtime_defaults.ip.ver = 6, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info ivb_display = { + .has_hotplug = 1, + IVB_PIPE_OFFSETS, + IVB_CURSOR_OFFSETS, + IVB_COLORS, + + .__runtime_defaults.ip.ver = 7, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info vlv_display = { + .has_gmch = 1, + .has_hotplug = 1, + .mmio_offset = VLV_DISPLAY_BASE, + I9XX_PIPE_OFFSETS, + I9XX_CURSOR_OFFSETS, + I9XX_COLORS, + + .__runtime_defaults.ip.ver = 7, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B), +}; + +static const struct intel_display_device_info hsw_display = { + .has_ddi = 1, + .has_dp_mst = 1, + .has_fpga_dbg = 1, + .has_hotplug = 1, + HSW_PIPE_OFFSETS, + IVB_CURSOR_OFFSETS, + IVB_COLORS, + + .__runtime_defaults.ip.ver = 7, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info bdw_display = { + .has_ddi = 1, + .has_dp_mst = 1, + .has_fpga_dbg = 1, + .has_hotplug = 1, + HSW_PIPE_OFFSETS, + IVB_CURSOR_OFFSETS, + IVB_COLORS, + + .__runtime_defaults.ip.ver = 8, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +static const struct intel_display_device_info chv_display = { + .has_hotplug = 1, + .has_gmch = 1, + .mmio_offset = VLV_DISPLAY_BASE, + CHV_PIPE_OFFSETS, + CHV_CURSOR_OFFSETS, + CHV_COLORS, + + .__runtime_defaults.ip.ver = 8, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), +}; + +static const struct intel_display_device_info skl_display = { + .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ + .dbuf.slice_mask = BIT(DBUF_S1), + .has_ddi = 1, + .has_dp_mst = 1, + .has_fpga_dbg = 1, + .has_hotplug = 1, + .has_ipc = 1, + .has_psr = 1, + .has_psr_hw_tracking = 1, + HSW_PIPE_OFFSETS, + IVB_CURSOR_OFFSETS, + IVB_COLORS, + + .__runtime_defaults.ip.ver = 9, + .__runtime_defaults.has_dmc = 1, + .__runtime_defaults.has_hdcp = 1, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +#define GEN9_LP_DISPLAY \ + .dbuf.slice_mask = BIT(DBUF_S1), \ + .has_dp_mst = 1, \ + .has_ddi = 1, \ + .has_fpga_dbg = 1, \ + .has_hotplug = 1, \ + .has_ipc = 1, \ + .has_psr = 1, \ + .has_psr_hw_tracking = 1, \ + HSW_PIPE_OFFSETS, \ + IVB_CURSOR_OFFSETS, \ + IVB_COLORS, \ + \ + .__runtime_defaults.has_dmc = 1, \ + .__runtime_defaults.has_hdcp = 1, \ + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ + BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C) + +static const struct intel_display_device_info bxt_display = { + GEN9_LP_DISPLAY, + .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */ + + .__runtime_defaults.ip.ver = 9, +}; + +static const struct intel_display_device_info glk_display = { + GEN9_LP_DISPLAY, + .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ + GLK_COLORS, + + .__runtime_defaults.ip.ver = 10, +}; + +static const struct intel_display_device_info gen11_display = { + .abox_mask = BIT(0), + .dbuf.size = 2048, + .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), + .has_ddi = 1, + .has_dp_mst = 1, + .has_fpga_dbg = 1, + .has_hotplug = 1, + .has_ipc = 1, + .has_psr = 1, + .has_psr_hw_tracking = 1, + .pipe_offsets = { + [TRANSCODER_A] = PIPE_A_OFFSET, + [TRANSCODER_B] = PIPE_B_OFFSET, + [TRANSCODER_C] = PIPE_C_OFFSET, + [TRANSCODER_EDP] = PIPE_EDP_OFFSET, + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, + }, + .trans_offsets = { + [TRANSCODER_A] = TRANSCODER_A_OFFSET, + [TRANSCODER_B] = TRANSCODER_B_OFFSET, + [TRANSCODER_C] = TRANSCODER_C_OFFSET, + [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, + }, + IVB_CURSOR_OFFSETS, + ICL_COLORS, + + .__runtime_defaults.ip.ver = 11, + .__runtime_defaults.has_dmc = 1, + .__runtime_defaults.has_dsc = 1, + .__runtime_defaults.has_hdcp = 1, + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +}; + +#define XE_D_DISPLAY \ + .abox_mask = GENMASK(2, 1), \ + .dbuf.size = 2048, \ + .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ + .has_ddi = 1, \ + .has_dp_mst = 1, \ + .has_dsb = 1, \ + .has_fpga_dbg = 1, \ + .has_hotplug = 1, \ + .has_ipc = 1, \ + .has_psr = 1, \ + .has_psr_hw_tracking = 1, \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + }, \ + TGL_CURSOR_OFFSETS, \ + ICL_COLORS, \ + \ + .__runtime_defaults.ip.ver = 12, \ + .__runtime_defaults.has_dmc = 1, \ + .__runtime_defaults.has_dsc = 1, \ + .__runtime_defaults.has_hdcp = 1, \ + .__runtime_defaults.pipe_mask = \ + BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) + +static const struct intel_display_device_info tgl_display = { + XE_D_DISPLAY, +}; + +static const struct intel_display_device_info rkl_display = { + XE_D_DISPLAY, + .abox_mask = BIT(0), + .has_hti = 1, + .has_psr_hw_tracking = 0, + + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), +}; + +static const struct intel_display_device_info adl_s_display = { + XE_D_DISPLAY, + .has_hti = 1, + .has_psr_hw_tracking = 0, +}; + +#define XE_LPD_FEATURES \ + .abox_mask = GENMASK(1, 0), \ + .color = { \ + .degamma_lut_size = 129, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + }, \ + .dbuf.size = 4096, \ + .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ + BIT(DBUF_S4), \ + .has_ddi = 1, \ + .has_dp_mst = 1, \ + .has_dsb = 1, \ + .has_fpga_dbg = 1, \ + .has_hotplug = 1, \ + .has_ipc = 1, \ + .has_psr = 1, \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + }, \ + TGL_CURSOR_OFFSETS, \ + \ + .__runtime_defaults.ip.ver = 13, \ + .__runtime_defaults.has_dmc = 1, \ + .__runtime_defaults.has_dsc = 1, \ + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime_defaults.has_hdcp = 1, \ + .__runtime_defaults.pipe_mask = \ + BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D) + +static const struct intel_display_device_info xe_lpd_display = { + XE_LPD_FEATURES, + .has_cdclk_crawl = 1, + .has_psr_hw_tracking = 0, + + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), +}; + +static const struct intel_display_device_info xe_hpd_display = { + XE_LPD_FEATURES, + .has_cdclk_squash = 1, + + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), +}; + +static const struct intel_display_device_info xe_lpdp_display = { + XE_LPD_FEATURES, + .has_cdclk_crawl = 1, + .has_cdclk_squash = 1, + + .__runtime_defaults.ip.ver = 14, + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B), + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), +}; + +__diag_pop(); + +#undef INTEL_VGA_DEVICE +#undef INTEL_QUANTA_VGA_DEVICE +#define INTEL_VGA_DEVICE(id, info) { id, info } +#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info } + +static const struct { + u32 devid; + const struct intel_display_device_info *info; +} intel_display_ids[] = { + INTEL_I830_IDS(&i830_display), + INTEL_I845G_IDS(&i845_display), + INTEL_I85X_IDS(&i85x_display), + INTEL_I865G_IDS(&i865g_display), + INTEL_I915G_IDS(&i915g_display), + INTEL_I915GM_IDS(&i915gm_display), + INTEL_I945G_IDS(&i945g_display), + INTEL_I945GM_IDS(&i945gm_display), + INTEL_I965G_IDS(&i965g_display), + INTEL_G33_IDS(&g33_display), + INTEL_I965GM_IDS(&i965gm_display), + INTEL_GM45_IDS(&gm45_display), + INTEL_G45_IDS(&g45_display), + INTEL_PINEVIEW_G_IDS(&g33_display), + INTEL_PINEVIEW_M_IDS(&g33_display), + INTEL_IRONLAKE_D_IDS(&ilk_d_display), + INTEL_IRONLAKE_M_IDS(&ilk_m_display), + INTEL_SNB_D_IDS(&snb_display), + INTEL_SNB_M_IDS(&snb_display), + INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */ + INTEL_IVB_M_IDS(&ivb_display), + INTEL_IVB_D_IDS(&ivb_display), + INTEL_HSW_IDS(&hsw_display), + INTEL_VLV_IDS(&vlv_display), + INTEL_BDW_IDS(&bdw_display), + INTEL_CHV_IDS(&chv_display), + INTEL_SKL_IDS(&skl_display), + INTEL_BXT_IDS(&bxt_display), + INTEL_GLK_IDS(&glk_display), + INTEL_KBL_IDS(&skl_display), + INTEL_CFL_IDS(&skl_display), + INTEL_ICL_11_IDS(&gen11_display), + INTEL_EHL_IDS(&gen11_display), + INTEL_JSL_IDS(&gen11_display), + INTEL_TGL_12_IDS(&tgl_display), + INTEL_DG1_IDS(&tgl_display), + INTEL_RKL_IDS(&rkl_display), + INTEL_ADLS_IDS(&adl_s_display), + INTEL_RPLS_IDS(&adl_s_display), + INTEL_ADLP_IDS(&xe_lpd_display), + INTEL_ADLN_IDS(&xe_lpd_display), + INTEL_RPLP_IDS(&xe_lpd_display), + INTEL_DG2_IDS(&xe_hpd_display), + + /* + * Do not add any GMD_ID-based platforms to this list. They will + * be probed automatically based on the IP version reported by + * the hardware. + */ +}; + +static const struct { + u16 ver; + u16 rel; + const struct intel_display_device_info *display; +} gmdid_display_map[] = { + { 14, 0, &xe_lpdp_display }, +}; + +static const struct intel_display_device_info * +probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + void __iomem *addr; + u32 val; + int i; + + addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32)); + if (!addr) { + drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n"); + return &no_display; + } + + val = ioread32(addr); + pci_iounmap(pdev, addr); + + if (val == 0) + /* Platform doesn't have display */ + return &no_display; + + *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val); + *rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); + *step = REG_FIELD_GET(GMD_ID_STEP, val); + + for (i = 0; i < ARRAY_SIZE(gmdid_display_map); i++) + if (*ver == gmdid_display_map[i].ver && + *rel == gmdid_display_map[i].rel) + return gmdid_display_map[i].display; + + drm_err(&i915->drm, "Unrecognized display IP version %d.%02d; disabling display.\n", + *ver, *rel); + return &no_display; +} + +const struct intel_display_device_info * +intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, + u16 *gmdid_ver, u16 *gmdid_rel, u16 *gmdid_step) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + int i; + + if (has_gmdid) + return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step); + + for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) { + if (intel_display_ids[i].devid == pdev->device) + return intel_display_ids[i].info; + } + + drm_dbg(&i915->drm, "No display ID found for device ID %04x; disabling display.\n", + pdev->device); + + return &no_display; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h new file mode 100644 index 000000000000..2aa82cbdf1c5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_DEVICE_H__ +#define __INTEL_DISPLAY_DEVICE_H__ + +#include <linux/types.h> + +#include "display/intel_display_limits.h" + +struct drm_i915_private; + +#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ + /* Keep in alphabetical order */ \ + func(cursor_needs_physical); \ + func(has_cdclk_crawl); \ + func(has_cdclk_squash); \ + func(has_ddi); \ + func(has_dp_mst); \ + func(has_dsb); \ + func(has_fpga_dbg); \ + func(has_gmch); \ + func(has_hotplug); \ + func(has_hti); \ + func(has_ipc); \ + func(has_overlay); \ + func(has_psr); \ + func(has_psr_hw_tracking); \ + func(overlay_needs_physical); \ + func(supports_tv); + +#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) +#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl) +#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash) +#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7) +#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915)) +#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi) +#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0) +#define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc) +#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915)) +#define HAS_DP_MST(i915) (DISPLAY_INFO(i915)->has_dp_mst) +#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14) +#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13) +#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb) +#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc) +#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0) +#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg) +#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2) +#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4) +#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915)) +#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch) +#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) +#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc) +#define HAS_IPS(i915) (IS_HSW_ULT(i915) || IS_BROADWELL(i915)) +#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10)) +#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) +#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) +#define HAS_OVERLAY(i915) (DISPLAY_INFO(i915)->has_overlay) +#define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr) +#define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking) +#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12) +#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915)) +#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \ + BIT(trans)) != 0) +#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) +#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask)) +#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug) +#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical) +#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv) + +struct intel_display_runtime_info { + struct { + u16 ver; + u16 rel; + u16 step; + } ip; + + u8 pipe_mask; + u8 cpu_transcoder_mask; + + u8 num_sprites[I915_MAX_PIPES]; + u8 num_scalers[I915_MAX_PIPES]; + + u8 fbc_mask; + + bool has_hdcp; + bool has_dmc; + bool has_dsc; +}; + +struct intel_display_device_info { + /* Initial runtime info. */ + const struct intel_display_runtime_info __runtime_defaults; + + u8 abox_mask; + + struct { + u16 size; /* in blocks */ + u8 slice_mask; + } dbuf; + +#define DEFINE_FLAG(name) u8 name:1 + DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); +#undef DEFINE_FLAG + + /* Global register offset for the display engine */ + u32 mmio_offset; + + /* Register offsets for the various display pipes and transcoders */ + u32 pipe_offsets[I915_MAX_TRANSCODERS]; + u32 trans_offsets[I915_MAX_TRANSCODERS]; + u32 cursor_offsets[I915_MAX_PIPES]; + + struct { + u32 degamma_lut_size; + u32 gamma_lut_size; + u32 degamma_lut_tests; + u32 gamma_lut_tests; + } color; +}; + +const struct intel_display_device_info * +intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, + u16 *ver, u16 *rel, u16 *step); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c new file mode 100644 index 000000000000..60ce10fc7205 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022-2023 Intel Corporation + * + * High level display driver entry points. This is a layer between top level + * driver code and low level display functionality; no low level display code or + * details here. + */ + +#include <linux/vga_switcheroo.h> +#include <acpi/video.h> +#include <drm/display/drm_dp_mst_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_mode_config.h> +#include <drm/drm_privacy_screen_consumer.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "i915_drv.h" +#include "i9xx_wm.h" +#include "intel_acpi.h" +#include "intel_atomic.h" +#include "intel_audio.h" +#include "intel_bios.h" +#include "intel_bw.h" +#include "intel_cdclk.h" +#include "intel_color.h" +#include "intel_crtc.h" +#include "intel_display_debugfs.h" +#include "intel_display_driver.h" +#include "intel_display_power.h" +#include "intel_display_types.h" +#include "intel_dkl_phy.h" +#include "intel_dmc.h" +#include "intel_dp.h" +#include "intel_dpll.h" +#include "intel_dpll_mgr.h" +#include "intel_fb.h" +#include "intel_fbc.h" +#include "intel_fbdev.h" +#include "intel_fdi.h" +#include "intel_gmbus.h" +#include "intel_hdcp.h" +#include "intel_hotplug.h" +#include "intel_hti.h" +#include "intel_modeset_setup.h" +#include "intel_opregion.h" +#include "intel_overlay.h" +#include "intel_plane_initial.h" +#include "intel_pps.h" +#include "intel_quirks.h" +#include "intel_vga.h" +#include "intel_wm.h" +#include "skl_watermark.h" + +bool intel_display_driver_probe_defer(struct pci_dev *pdev) +{ + struct drm_privacy_screen *privacy_screen; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return true; + + /* If the LCD panel has a privacy-screen, wait for it */ + privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); + if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) + return true; + + drm_privacy_screen_put(privacy_screen); + + return false; +} + +void intel_display_driver_init_hw(struct drm_i915_private *i915) +{ + struct intel_cdclk_state *cdclk_state; + + if (!HAS_DISPLAY(i915)) + return; + + cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); + + intel_update_cdclk(i915); + intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; +} + +static const struct drm_mode_config_funcs intel_mode_funcs = { + .fb_create = intel_user_framebuffer_create, + .get_format_info = intel_fb_get_format_info, + .output_poll_changed = intel_fbdev_output_poll_changed, + .mode_valid = intel_mode_valid, + .atomic_check = intel_atomic_check, + .atomic_commit = intel_atomic_commit, + .atomic_state_alloc = intel_atomic_state_alloc, + .atomic_state_clear = intel_atomic_state_clear, + .atomic_state_free = intel_atomic_state_free, +}; + +static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, +}; + +static void intel_mode_config_init(struct drm_i915_private *i915) +{ + struct drm_mode_config *mode_config = &i915->drm.mode_config; + + drm_mode_config_init(&i915->drm); + INIT_LIST_HEAD(&i915->display.global.obj_list); + + mode_config->min_width = 0; + mode_config->min_height = 0; + + mode_config->preferred_depth = 24; + mode_config->prefer_shadow = 1; + + mode_config->funcs = &intel_mode_funcs; + mode_config->helper_private = &intel_mode_config_funcs; + + mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); + + /* + * Maximum framebuffer dimensions, chosen to match + * the maximum render engine surface size on gen4+. + */ + if (DISPLAY_VER(i915) >= 7) { + mode_config->max_width = 16384; + mode_config->max_height = 16384; + } else if (DISPLAY_VER(i915) >= 4) { + mode_config->max_width = 8192; + mode_config->max_height = 8192; + } else if (DISPLAY_VER(i915) == 3) { + mode_config->max_width = 4096; + mode_config->max_height = 4096; + } else { + mode_config->max_width = 2048; + mode_config->max_height = 2048; + } + + if (IS_I845G(i915) || IS_I865G(i915)) { + mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; + mode_config->cursor_height = 1023; + } else if (IS_I830(i915) || IS_I85X(i915) || + IS_I915G(i915) || IS_I915GM(i915)) { + mode_config->cursor_width = 64; + mode_config->cursor_height = 64; + } else { + mode_config->cursor_width = 256; + mode_config->cursor_height = 256; + } +} + +static void intel_mode_config_cleanup(struct drm_i915_private *i915) +{ + intel_atomic_global_obj_cleanup(i915); + drm_mode_config_cleanup(&i915->drm); +} + +static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + plane->pipe); + + plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); + } +} + +void intel_display_driver_early_probe(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + intel_dkl_phy_init(i915); + intel_color_init_hooks(i915); + intel_init_cdclk_hooks(i915); + intel_audio_hooks_init(i915); + intel_dpll_init_clock_hook(i915); + intel_init_display_hooks(i915); + intel_fdi_init_hook(i915); +} + +/* part #1: call before irq install */ +int intel_display_driver_probe_noirq(struct drm_i915_private *i915) +{ + int ret; + + if (i915_inject_probe_failure(i915)) + return -ENODEV; + + if (HAS_DISPLAY(i915)) { + ret = drm_vblank_init(&i915->drm, + INTEL_NUM_PIPES(i915)); + if (ret) + return ret; + } + + intel_bios_init(i915); + + ret = intel_vga_register(i915); + if (ret) + goto cleanup_bios; + + /* FIXME: completely on the wrong abstraction layer */ + ret = intel_power_domains_init(i915); + if (ret < 0) + goto cleanup_vga; + + intel_power_domains_init_hw(i915, false); + + if (!HAS_DISPLAY(i915)) + return 0; + + intel_dmc_init(i915); + + i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); + i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + + intel_mode_config_init(i915); + + ret = intel_cdclk_init(i915); + if (ret) + goto cleanup_vga_client_pw_domain_dmc; + + ret = intel_color_init(i915); + if (ret) + goto cleanup_vga_client_pw_domain_dmc; + + ret = intel_dbuf_init(i915); + if (ret) + goto cleanup_vga_client_pw_domain_dmc; + + ret = intel_bw_init(i915); + if (ret) + goto cleanup_vga_client_pw_domain_dmc; + + init_llist_head(&i915->display.atomic_helper.free_list); + INIT_WORK(&i915->display.atomic_helper.free_work, + intel_atomic_helper_free_state_worker); + + intel_init_quirks(i915); + + intel_fbc_init(i915); + + return 0; + +cleanup_vga_client_pw_domain_dmc: + intel_dmc_fini(i915); + intel_power_domains_driver_remove(i915); +cleanup_vga: + intel_vga_unregister(i915); +cleanup_bios: + intel_bios_driver_remove(i915); + + return ret; +} + +/* part #2: call after irq install, but before gem init */ +int intel_display_driver_probe_nogem(struct drm_i915_private *i915) +{ + struct drm_device *dev = &i915->drm; + enum pipe pipe; + struct intel_crtc *crtc; + int ret; + + if (!HAS_DISPLAY(i915)) + return 0; + + intel_wm_init(i915); + + intel_panel_sanitize_ssc(i915); + + intel_pps_setup(i915); + + intel_gmbus_setup(i915); + + drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", + INTEL_NUM_PIPES(i915), + INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); + + for_each_pipe(i915, pipe) { + ret = intel_crtc_init(i915, pipe); + if (ret) { + intel_mode_config_cleanup(i915); + return ret; + } + } + + intel_plane_possible_crtcs_init(i915); + intel_shared_dpll_init(i915); + intel_fdi_pll_freq_update(i915); + + intel_update_czclk(i915); + intel_display_driver_init_hw(i915); + intel_dpll_update_ref_clks(i915); + + intel_hdcp_component_init(i915); + + if (i915->display.cdclk.max_cdclk_freq == 0) + intel_update_max_cdclk(i915); + + intel_hti_init(i915); + + /* Just disable it once at startup */ + intel_vga_disable(i915); + intel_setup_outputs(i915); + + drm_modeset_lock_all(dev); + intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); + intel_acpi_assign_connector_fwnodes(i915); + drm_modeset_unlock_all(dev); + + for_each_intel_crtc(dev, crtc) { + if (!to_intel_crtc_state(crtc->base.state)->uapi.active) + continue; + intel_crtc_initial_plane_config(crtc); + } + + /* + * Make sure hardware watermarks really match the state we read out. + * Note that we need to do this after reconstructing the BIOS fb's + * since the watermark calculation done here will use pstate->fb. + */ + if (!HAS_GMCH(i915)) + ilk_wm_sanitize(i915); + + return 0; +} + +/* part #3: call after gem init */ +int intel_display_driver_probe(struct drm_i915_private *i915) +{ + int ret; + + if (!HAS_DISPLAY(i915)) + return 0; + + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(&i915->drm); + if (ret) + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); + + intel_overlay_setup(i915); + + ret = intel_fbdev_init(&i915->drm); + if (ret) + return ret; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(i915); + intel_hpd_poll_disable(i915); + + skl_watermark_ipc_init(i915); + + return 0; +} + +void intel_display_driver_register(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + /* Must be done after probing outputs */ + intel_opregion_register(i915); + intel_acpi_video_register(i915); + + intel_audio_init(i915); + + intel_display_debugfs_register(i915); + + /* + * Some ports require correctly set-up hpd registers for + * detection to work properly (leading to ghost connected + * connector status), e.g. VGA on gm45. Hence we can only set + * up the initial fbdev config after hpd irqs are fully + * enabled. We do it last so that the async config cannot run + * before the connectors are registered. + */ + intel_fbdev_initial_config_async(i915); + + /* + * We need to coordinate the hotplugs with the asynchronous + * fbdev configuration, for which we use the + * fbdev->async_cookie. + */ + drm_kms_helper_poll_init(&i915->drm); +} + +/* part #1: call before irq uninstall */ +void intel_display_driver_remove(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + flush_workqueue(i915->display.wq.flip); + flush_workqueue(i915->display.wq.modeset); + + flush_work(&i915->display.atomic_helper.free_work); + drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); + + /* + * MST topology needs to be suspended so we don't have any calls to + * fbdev after it's finalized. MST will be destroyed later as part of + * drm_mode_config_cleanup() + */ + intel_dp_mst_suspend(i915); +} + +/* part #2: call after irq uninstall */ +void intel_display_driver_remove_noirq(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + /* + * Due to the hpd irq storm handling the hotplug work can re-arm the + * poll handlers. Hence disable polling after hpd handling is shut down. + */ + intel_hpd_poll_fini(i915); + + /* poll work can call into fbdev, hence clean that up afterwards */ + intel_fbdev_fini(i915); + + intel_unregister_dsm_handler(); + + /* flush any delayed tasks or pending work */ + flush_scheduled_work(); + + intel_hdcp_component_fini(i915); + + intel_mode_config_cleanup(i915); + + intel_overlay_cleanup(i915); + + intel_gmbus_teardown(i915); + + destroy_workqueue(i915->display.wq.flip); + destroy_workqueue(i915->display.wq.modeset); + + intel_fbc_cleanup(i915); +} + +/* part #3: call after gem init */ +void intel_display_driver_remove_nogem(struct drm_i915_private *i915) +{ + intel_dmc_fini(i915); + + intel_power_domains_driver_remove(i915); + + intel_vga_unregister(i915); + + intel_bios_driver_remove(i915); +} + +void intel_display_driver_unregister(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + intel_fbdev_unregister(i915); + intel_audio_deinit(i915); + + /* + * After flushing the fbdev (incl. a late async config which + * will have delayed queuing of a hotplug event), then flush + * the hotplug events. + */ + drm_kms_helper_poll_fini(&i915->drm); + drm_atomic_helper_shutdown(&i915->drm); + + acpi_video_unregister(); + intel_opregion_unregister(i915); +} + +/* + * turn all crtc's off, but do not adjust state + * This has to be paired with a call to intel_modeset_setup_hw_state. + */ +int intel_display_driver_suspend(struct drm_i915_private *i915) +{ + struct drm_atomic_state *state; + int ret; + + if (!HAS_DISPLAY(i915)) + return 0; + + state = drm_atomic_helper_suspend(&i915->drm); + ret = PTR_ERR_OR_ZERO(state); + if (ret) + drm_err(&i915->drm, "Suspending crtc's failed with %i\n", + ret); + else + i915->display.restore.modeset_state = state; + return ret; +} + +int +__intel_display_driver_resume(struct drm_i915_private *i915, + struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int ret, i; + + intel_modeset_setup_hw_state(i915, ctx); + intel_vga_redisable(i915); + + if (!state) + return 0; + + /* + * We've duplicated the state, pointers to the old state are invalid. + * + * Don't attempt to use the old state until we commit the duplicated state. + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; + } + + /* ignore any reset values/BIOS leftovers in the WM registers */ + if (!HAS_GMCH(i915)) + to_intel_atomic_state(state)->skip_intermediate_wm = true; + + ret = drm_atomic_helper_commit_duplicated_state(state, ctx); + + drm_WARN_ON(&i915->drm, ret == -EDEADLK); + + return ret; +} + +void intel_display_driver_resume(struct drm_i915_private *i915) +{ + struct drm_atomic_state *state = i915->display.restore.modeset_state; + struct drm_modeset_acquire_ctx ctx; + int ret; + + if (!HAS_DISPLAY(i915)) + return; + + i915->display.restore.modeset_state = NULL; + if (state) + state->acquire_ctx = &ctx; + + drm_modeset_acquire_init(&ctx, 0); + + while (1) { + ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(&ctx); + } + + if (!ret) + ret = __intel_display_driver_resume(i915, state, &ctx); + + skl_watermark_ipc_update(i915); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + if (ret) + drm_err(&i915->drm, + "Restoring old state failed with %i\n", ret); + if (state) + drm_atomic_state_put(state); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h new file mode 100644 index 000000000000..c276a58ee329 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_driver.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022-2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_DRIVER_H__ +#define __INTEL_DISPLAY_DRIVER_H__ + +#include <linux/types.h> + +struct drm_atomic_state; +struct drm_i915_private; +struct drm_modeset_acquire_ctx; +struct pci_dev; + +bool intel_display_driver_probe_defer(struct pci_dev *pdev); +void intel_display_driver_init_hw(struct drm_i915_private *i915); +void intel_display_driver_early_probe(struct drm_i915_private *i915); +int intel_display_driver_probe_noirq(struct drm_i915_private *i915); +int intel_display_driver_probe_nogem(struct drm_i915_private *i915); +int intel_display_driver_probe(struct drm_i915_private *i915); +void intel_display_driver_register(struct drm_i915_private *i915); +void intel_display_driver_remove(struct drm_i915_private *i915); +void intel_display_driver_remove_noirq(struct drm_i915_private *i915); +void intel_display_driver_remove_nogem(struct drm_i915_private *i915); +void intel_display_driver_unregister(struct drm_i915_private *i915); +int intel_display_driver_suspend(struct drm_i915_private *i915); +void intel_display_driver_resume(struct drm_i915_private *i915); + +/* interface for intel_display_reset.c */ +int __intel_display_driver_resume(struct drm_i915_private *i915, + struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx); + +#endif /* __INTEL_DISPLAY_DRIVER_H__ */ + diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c new file mode 100644 index 000000000000..3b2a287d2041 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -0,0 +1,1668 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "gt/intel_rps.h" +#include "i915_drv.h" +#include "i915_irq.h" +#include "i915_reg.h" +#include "icl_dsi_regs.h" +#include "intel_crtc.h" +#include "intel_de.h" +#include "intel_display_irq.h" +#include "intel_display_trace.h" +#include "intel_display_types.h" +#include "intel_dp_aux.h" +#include "intel_fdi_regs.h" +#include "intel_fifo_underrun.h" +#include "intel_gmbus.h" +#include "intel_hotplug_irq.h" +#include "intel_psr.h" +#include "intel_psr_regs.h" + +static void +intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + + drm_crtc_handle_vblank(&crtc->base); +} + +/** + * ilk_update_display_irq - update DEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +void ilk_update_display_irq(struct drm_i915_private *dev_priv, + u32 interrupt_mask, u32 enabled_irq_mask) +{ + u32 new_val; + + lockdep_assert_held(&dev_priv->irq_lock); + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + + new_val = dev_priv->irq_mask; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->irq_mask && + !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { + dev_priv->irq_mask = new_val; + intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); + intel_uncore_posting_read(&dev_priv->uncore, DEIMR); + } +} + +void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) +{ + ilk_update_display_irq(i915, bits, bits); +} + +void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) +{ + ilk_update_display_irq(i915, bits, 0); +} + +/** + * bdw_update_port_irq - update DE port interrupt + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +void bdw_update_port_irq(struct drm_i915_private *dev_priv, + u32 interrupt_mask, u32 enabled_irq_mask) +{ + u32 new_val; + u32 old_val; + + lockdep_assert_held(&dev_priv->irq_lock); + + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + return; + + old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); + + new_val = old_val; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != old_val) { + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); + } +} + +/** + * bdw_update_pipe_irq - update DE pipe interrupt + * @dev_priv: driver private + * @pipe: pipe whose interrupt to update + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 interrupt_mask, + u32 enabled_irq_mask) +{ + u32 new_val; + + lockdep_assert_held(&dev_priv->irq_lock); + + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + return; + + new_val = dev_priv->de_irq_mask[pipe]; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->de_irq_mask[pipe]) { + dev_priv->de_irq_mask[pipe] = new_val; + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); + } +} + +void bdw_enable_pipe_irq(struct drm_i915_private *i915, + enum pipe pipe, u32 bits) +{ + bdw_update_pipe_irq(i915, pipe, bits, bits); +} + +void bdw_disable_pipe_irq(struct drm_i915_private *i915, + enum pipe pipe, u32 bits) +{ + bdw_update_pipe_irq(i915, pipe, bits, 0); +} + +/** + * ibx_display_interrupt_update - update SDEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + u32 interrupt_mask, + u32 enabled_irq_mask) +{ + u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); + + sdeimr &= ~interrupt_mask; + sdeimr |= (~enabled_irq_mask & interrupt_mask); + + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + + lockdep_assert_held(&dev_priv->irq_lock); + + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + return; + + intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); + intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); +} + +void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) +{ + ibx_display_interrupt_update(i915, bits, bits); +} + +void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) +{ + ibx_display_interrupt_update(i915, bits, 0); +} + +u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; + u32 enable_mask = status_mask << 16; + + lockdep_assert_held(&dev_priv->irq_lock); + + if (DISPLAY_VER(dev_priv) < 5) + goto out; + + /* + * On pipe A we don't support the PSR interrupt yet, + * on pipe B and C the same bit MBZ. + */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, + status_mask & PIPE_A_PSR_STATUS_VLV)) + return 0; + /* + * On pipe B and C we don't support the PSR interrupt yet, on pipe + * A the same bit is for perf counters which we don't use either. + */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, + status_mask & PIPE_B_PSR_STATUS_VLV)) + return 0; + + enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | + SPRITE0_FLIP_DONE_INT_EN_VLV | + SPRITE1_FLIP_DONE_INT_EN_VLV); + if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; + if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; + +out: + drm_WARN_ONCE(&dev_priv->drm, + enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", + pipe_name(pipe), enable_mask, status_mask); + + return enable_mask; +} + +void i915_enable_pipestat(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 status_mask) +{ + i915_reg_t reg = PIPESTAT(pipe); + u32 enable_mask; + + drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); + + lockdep_assert_held(&dev_priv->irq_lock); + drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + + if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) + return; + + dev_priv->pipestat_irq_mask[pipe] |= status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); + intel_uncore_posting_read(&dev_priv->uncore, reg); +} + +void i915_disable_pipestat(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 status_mask) +{ + i915_reg_t reg = PIPESTAT(pipe); + u32 enable_mask; + + drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); + + lockdep_assert_held(&dev_priv->irq_lock); + drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + + if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) + return; + + dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); + intel_uncore_posting_read(&dev_priv->uncore, reg); +} + +static bool i915_has_asle(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->display.opregion.asle) + return false; + + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + +/** + * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion + * @dev_priv: i915 device private + */ +void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) +{ + if (!i915_has_asle(dev_priv)) + return; + + spin_lock_irq(&dev_priv->irq_lock); + + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); + if (DISPLAY_VER(dev_priv) >= 4) + i915_enable_pipestat(dev_priv, PIPE_A, + PIPE_LEGACY_BLC_EVENT_STATUS); + + spin_unlock_irq(&dev_priv->irq_lock); +} + +#if defined(CONFIG_DEBUG_FS) +static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; + u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; + + trace_intel_pipe_crc(crtc, crcs); + + spin_lock(&pipe_crc->lock); + /* + * For some not yet identified reason, the first CRC is + * bonkers. So let's just wait for the next vblank and read + * out the buggy result. + * + * On GEN8+ sometimes the second CRC is bonkers as well, so + * don't trust that one either. + */ + if (pipe_crc->skipped <= 0 || + (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { + pipe_crc->skipped++; + spin_unlock(&pipe_crc->lock); + return; + } + spin_unlock(&pipe_crc->lock); + + drm_crtc_add_crc_entry(&crtc->base, true, + drm_crtc_accurate_vblank_count(&crtc->base), + crcs); +} +#else +static inline void +display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) {} +#endif + +static void flip_done_handler(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct drm_crtc_state *crtc_state = crtc->base.state; + struct drm_pending_vblank_event *e = crtc_state->event; + struct drm_device *dev = &i915->drm; + unsigned long irqflags; + + spin_lock_irqsave(&dev->event_lock, irqflags); + + crtc_state->event = NULL; + + drm_crtc_send_vblank_event(&crtc->base, e); + + spin_unlock_irqrestore(&dev->event_lock, irqflags); +} + +static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + display_pipe_crc_irq_handler(dev_priv, pipe, + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), + 0, 0, 0, 0); +} + +static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + display_pipe_crc_irq_handler(dev_priv, pipe, + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); +} + +static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 res1, res2; + + if (DISPLAY_VER(dev_priv) >= 3) + res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); + else + res1 = 0; + + if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) + res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); + else + res2 = 0; + + display_pipe_crc_irq_handler(dev_priv, pipe, + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), + res1, res2); +} + +void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), + PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS); + + dev_priv->pipestat_irq_mask[pipe] = 0; + } +} + +void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + enum pipe pipe; + + spin_lock(&dev_priv->irq_lock); + + if (!dev_priv->display_irqs_enabled) { + spin_unlock(&dev_priv->irq_lock); + return; + } + + for_each_pipe(dev_priv, pipe) { + i915_reg_t reg; + u32 status_mask, enable_mask, iir_bit = 0; + + /* + * PIPESTAT bits get signalled even when the interrupt is + * disabled with the mask bits, and some of the status bits do + * not generate interrupts at all (like the underrun bit). Hence + * we need to be careful that we only handle what we want to + * handle. + */ + + /* fifo underruns are filterered in the underrun handler. */ + status_mask = PIPE_FIFO_UNDERRUN_STATUS; + + switch (pipe) { + default: + case PIPE_A: + iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; + case PIPE_B: + iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + break; + case PIPE_C: + iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; + break; + } + if (iir & iir_bit) + status_mask |= dev_priv->pipestat_irq_mask[pipe]; + + if (!status_mask) + continue; + + reg = PIPESTAT(pipe); + pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + /* + * Clear the PIPE*STAT regs before the IIR + * + * Toggle the enable bits to make sure we get an + * edge in the ISR pipe event bit if we don't clear + * all the enabled status bits. Otherwise the edge + * triggered IIR on i965/g4x wouldn't notice that + * an interrupt is still pending. + */ + if (pipe_stats[pipe]) { + intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); + intel_uncore_write(&dev_priv->uncore, reg, enable_mask); + } + } + spin_unlock(&dev_priv->irq_lock); +} + +void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u16 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + intel_handle_vblank(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } +} + +void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + bool blc_event = false; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + intel_handle_vblank(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev_priv); +} + +void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + bool blc_event = false; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) + intel_handle_vblank(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev_priv); + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + intel_gmbus_irq_handler(dev_priv); +} + +void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u32 pipe_stats[I915_MAX_PIPES]) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) + intel_handle_vblank(dev_priv, pipe); + + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) + flip_done_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + intel_gmbus_irq_handler(dev_priv); +} + +static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + enum pipe pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + + ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + + if (pch_iir & SDE_AUDIO_POWER_MASK) { + int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> + SDE_AUDIO_POWER_SHIFT); + drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", + port_name(port)); + } + + if (pch_iir & SDE_AUX_MASK) + intel_dp_aux_irq_handler(dev_priv); + + if (pch_iir & SDE_GMBUS) + intel_gmbus_irq_handler(dev_priv); + + if (pch_iir & SDE_AUDIO_HDCP_MASK) + drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); + + if (pch_iir & SDE_AUDIO_TRANS_MASK) + drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); + + if (pch_iir & SDE_POISON) + drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + + if (pch_iir & SDE_FDI_MASK) { + for_each_pipe(dev_priv, pipe) + drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); + } + + if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) + drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); + + if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) + drm_dbg(&dev_priv->drm, + "PCH transcoder CRC error interrupt\n"); + + if (pch_iir & SDE_TRANSA_FIFO_UNDER) + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); + + if (pch_iir & SDE_TRANSB_FIFO_UNDER) + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); +} + +static void ivb_err_int_handler(struct drm_i915_private *dev_priv) +{ + u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); + enum pipe pipe; + + if (err_int & ERR_INT_POISON) + drm_err(&dev_priv->drm, "Poison interrupt\n"); + + for_each_pipe(dev_priv, pipe) { + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + + if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { + if (IS_IVYBRIDGE(dev_priv)) + ivb_pipe_crc_irq_handler(dev_priv, pipe); + else + hsw_pipe_crc_irq_handler(dev_priv, pipe); + } + } + + intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); +} + +static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) +{ + u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); + enum pipe pipe; + + if (serr_int & SERR_INT_POISON) + drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + + for_each_pipe(dev_priv, pipe) + if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) + intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); + + intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); +} + +static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + enum pipe pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + + ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { + int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> + SDE_AUDIO_POWER_SHIFT_CPT); + drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", + port_name(port)); + } + + if (pch_iir & SDE_AUX_MASK_CPT) + intel_dp_aux_irq_handler(dev_priv); + + if (pch_iir & SDE_GMBUS_CPT) + intel_gmbus_irq_handler(dev_priv); + + if (pch_iir & SDE_AUDIO_CP_REQ_CPT) + drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_CHG_CPT) + drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); + + if (pch_iir & SDE_FDI_MASK_CPT) { + for_each_pipe(dev_priv, pipe) + drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); + } + + if (pch_iir & SDE_ERROR_CPT) + cpt_serr_int_handler(dev_priv); +} + +void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +{ + enum pipe pipe; + u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; + + if (hotplug_trigger) + ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + + if (de_iir & DE_AUX_CHANNEL_A) + intel_dp_aux_irq_handler(dev_priv); + + if (de_iir & DE_GSE) + intel_opregion_asle_intr(dev_priv); + + if (de_iir & DE_POISON) + drm_err(&dev_priv->drm, "Poison interrupt\n"); + + for_each_pipe(dev_priv, pipe) { + if (de_iir & DE_PIPE_VBLANK(pipe)) + intel_handle_vblank(dev_priv, pipe); + + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) + flip_done_handler(dev_priv, pipe); + + if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + + if (de_iir & DE_PIPE_CRC_DONE(pipe)) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); + + if (HAS_PCH_CPT(dev_priv)) + cpt_irq_handler(dev_priv, pch_iir); + else + ibx_irq_handler(dev_priv, pch_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); + } + + if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) + gen5_rps_irq_handler(&to_gt(dev_priv)->rps); +} + +void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +{ + enum pipe pipe; + u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; + + if (hotplug_trigger) + ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + + if (de_iir & DE_ERR_INT_IVB) + ivb_err_int_handler(dev_priv); + + if (de_iir & DE_AUX_CHANNEL_A_IVB) + intel_dp_aux_irq_handler(dev_priv); + + if (de_iir & DE_GSE_IVB) + intel_opregion_asle_intr(dev_priv); + + for_each_pipe(dev_priv, pipe) { + if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) + intel_handle_vblank(dev_priv, pipe); + + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) + flip_done_handler(dev_priv, pipe); + } + + /* check event from PCH */ + if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); + + cpt_irq_handler(dev_priv, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); + } +} + +static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) +{ + u32 mask; + + if (DISPLAY_VER(dev_priv) >= 14) + return TGL_DE_PORT_AUX_DDIA | + TGL_DE_PORT_AUX_DDIB; + else if (DISPLAY_VER(dev_priv) >= 13) + return TGL_DE_PORT_AUX_DDIA | + TGL_DE_PORT_AUX_DDIB | + TGL_DE_PORT_AUX_DDIC | + XELPD_DE_PORT_AUX_DDID | + XELPD_DE_PORT_AUX_DDIE | + TGL_DE_PORT_AUX_USBC1 | + TGL_DE_PORT_AUX_USBC2 | + TGL_DE_PORT_AUX_USBC3 | + TGL_DE_PORT_AUX_USBC4; + else if (DISPLAY_VER(dev_priv) >= 12) + return TGL_DE_PORT_AUX_DDIA | + TGL_DE_PORT_AUX_DDIB | + TGL_DE_PORT_AUX_DDIC | + TGL_DE_PORT_AUX_USBC1 | + TGL_DE_PORT_AUX_USBC2 | + TGL_DE_PORT_AUX_USBC3 | + TGL_DE_PORT_AUX_USBC4 | + TGL_DE_PORT_AUX_USBC5 | + TGL_DE_PORT_AUX_USBC6; + + mask = GEN8_AUX_CHANNEL_A; + if (DISPLAY_VER(dev_priv) >= 9) + mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (DISPLAY_VER(dev_priv) == 11) { + mask |= ICL_AUX_CHANNEL_F; + mask |= ICL_AUX_CHANNEL_E; + } + + return mask; +} + +static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) + return RKL_DE_PIPE_IRQ_FAULT_ERRORS; + else if (DISPLAY_VER(dev_priv) >= 11) + return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; + else if (DISPLAY_VER(dev_priv) >= 9) + return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; +} + +static void +gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +{ + bool found = false; + + if (iir & GEN8_DE_MISC_GSE) { + intel_opregion_asle_intr(dev_priv); + found = true; + } + + if (iir & GEN8_DE_EDP_PSR) { + struct intel_encoder *encoder; + u32 psr_iir; + i915_reg_t iir_reg; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (DISPLAY_VER(dev_priv) >= 12) + iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); + else + iir_reg = EDP_PSR_IIR; + + psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); + + if (psr_iir) + found = true; + + intel_psr_irq_handler(intel_dp, psr_iir); + + /* prior GEN12 only have one EDP PSR */ + if (DISPLAY_VER(dev_priv) < 12) + break; + } + } + + if (!found) + drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); +} + +static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, + u32 te_trigger) +{ + enum pipe pipe = INVALID_PIPE; + enum transcoder dsi_trans; + enum port port; + u32 val, tmp; + + /* + * Incase of dual link, TE comes from DSI_1 + * this is to check if dual link is enabled + */ + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); + val &= PORT_SYNC_MODE_ENABLE; + + /* + * if dual link is enabled, then read DSI_0 + * transcoder registers + */ + port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? + PORT_A : PORT_B; + dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; + + /* Check if DSI configured in command mode */ + val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); + val = val & OP_MODE_MASK; + + if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { + drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); + return; + } + + /* Get PIPE for handling VBLANK event */ + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); + switch (val & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + pipe = PIPE_C; + break; + default: + drm_err(&dev_priv->drm, "Invalid PIPE\n"); + return; + } + + intel_handle_vblank(dev_priv, pipe); + + /* clear TE in dsi IIR */ + port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; + tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); +} + +static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 9) + return GEN9_PIPE_PLANE1_FLIP_DONE; + else + return GEN8_PIPE_PRIMARY_FLIP_DONE; +} + +u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) +{ + u32 mask = GEN8_PIPE_FIFO_UNDERRUN; + + if (DISPLAY_VER(dev_priv) >= 13) + mask |= XELPD_PIPE_SOFT_UNDERRUN | + XELPD_PIPE_HARD_UNDERRUN; + + return mask; +} + +static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) +{ + u32 pica_ier = 0; + + *pica_iir = 0; + *pch_iir = intel_de_read(i915, SDEIIR); + if (!*pch_iir) + return; + + /** + * PICA IER must be disabled/re-enabled around clearing PICA IIR and + * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set + * their flags both in the PICA and SDE IIR. + */ + if (*pch_iir & SDE_PICAINTERRUPT) { + drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP); + + pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); + *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); + intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); + } + + intel_de_write(i915, SDEIIR, *pch_iir); + + if (pica_ier) + intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); +} + +void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) +{ + u32 iir; + enum pipe pipe; + + drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); + + if (master_ctl & GEN8_DE_MISC_IRQ) { + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); + if (iir) { + intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); + gen8_de_misc_irq_handler(dev_priv, iir); + } else { + drm_err_ratelimited(&dev_priv->drm, + "The master control interrupt lied (DE MISC)!\n"); + } + } + + if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { + iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); + if (iir) { + intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); + gen11_hpd_irq_handler(dev_priv, iir); + } else { + drm_err_ratelimited(&dev_priv->drm, + "The master control interrupt lied, (DE HPD)!\n"); + } + } + + if (master_ctl & GEN8_DE_PORT_IRQ) { + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); + if (iir) { + bool found = false; + + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); + + if (iir & gen8_de_port_aux_mask(dev_priv)) { + intel_dp_aux_irq_handler(dev_priv); + found = true; + } + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; + + if (hotplug_trigger) { + bxt_hpd_irq_handler(dev_priv, hotplug_trigger); + found = true; + } + } else if (IS_BROADWELL(dev_priv)) { + u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; + + if (hotplug_trigger) { + ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + found = true; + } + } + + if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + (iir & BXT_DE_PORT_GMBUS)) { + intel_gmbus_irq_handler(dev_priv); + found = true; + } + + if (DISPLAY_VER(dev_priv) >= 11) { + u32 te_trigger = iir & (DSI0_TE | DSI1_TE); + + if (te_trigger) { + gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); + found = true; + } + } + + if (!found) + drm_err_ratelimited(&dev_priv->drm, + "Unexpected DE Port interrupt\n"); + } else { + drm_err_ratelimited(&dev_priv->drm, + "The master control interrupt lied (DE PORT)!\n"); + } + } + + for_each_pipe(dev_priv, pipe) { + u32 fault_errors; + + if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) + continue; + + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); + if (!iir) { + drm_err_ratelimited(&dev_priv->drm, + "The master control interrupt lied (DE PIPE)!\n"); + continue; + } + + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); + + if (iir & GEN8_PIPE_VBLANK) + intel_handle_vblank(dev_priv, pipe); + + if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) + flip_done_handler(dev_priv, pipe); + + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev_priv, pipe); + + if (iir & gen8_de_pipe_underrun_mask(dev_priv)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + + fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); + if (fault_errors) + drm_err_ratelimited(&dev_priv->drm, + "Fault errors on pipe %c: 0x%08x\n", + pipe_name(pipe), + fault_errors); + } + + if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && + master_ctl & GEN8_DE_PCH_IRQ) { + u32 pica_iir; + + /* + * FIXME(BDW): Assume for now that the new interrupt handling + * scheme also closed the SDE interrupt handling race we've seen + * on older pch-split platforms. But this needs testing. + */ + gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); + if (iir) { + if (pica_iir) + xelpdp_pica_irq_handler(dev_priv, pica_iir); + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_irq_handler(dev_priv, iir); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) + spt_irq_handler(dev_priv, iir); + else + cpt_irq_handler(dev_priv, iir); + } else { + /* + * Like on previous PCH there seems to be something + * fishy going on with forwarding PCH interrupts. + */ + drm_dbg(&dev_priv->drm, + "The master control interrupt lied (SDE)!\n"); + } + } +} + +u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) +{ + void __iomem * const regs = i915->uncore.regs; + u32 iir; + + if (!(master_ctl & GEN11_GU_MISC_IRQ)) + return 0; + + iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); + if (likely(iir)) + raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); + + return iir; +} + +void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) +{ + if (iir & GEN11_GU_MISC_GSE) + intel_opregion_asle_intr(i915); +} + +void gen11_display_irq_handler(struct drm_i915_private *i915) +{ + void __iomem * const regs = i915->uncore.regs; + const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); + + disable_rpm_wakeref_asserts(&i915->runtime_pm); + /* + * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ + * for the display related bits. + */ + raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); + gen8_de_irq_handler(i915, disp_ctl); + raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, + GEN11_DISPLAY_IRQ_ENABLE); + + enable_rpm_wakeref_asserts(&i915->runtime_pm); +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +int i8xx_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +int i915gm_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + + /* + * Vblank interrupts fail to wake the device up from C2+. + * Disabling render clock gating during C-states avoids + * the problem. There is a small power cost so we do this + * only when vblank interrupts are actually enabled. + */ + if (dev_priv->vblank_enabled++ == 0) + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + + return i8xx_enable_vblank(crtc); +} + +int i965_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +int ilk_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ilk_enable_display_irq(dev_priv, bit); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + /* Even though there is no DMC, frame counter can get stuck when + * PSR is active as no frames are generated. + */ + if (HAS_PSR(dev_priv)) + drm_crtc_vblank_restore(crtc); + + return 0; +} + +static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, + bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + enum port port; + + if (!(intel_crtc->mode_flags & + (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) + return false; + + /* for dual link cases we consider TE from slave */ + if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) + port = PORT_B; + else + port = PORT_A; + + intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, + enable ? 0 : DSI_TE_EVENT); + + intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); + + return true; +} + +int bdw_enable_vblank(struct drm_crtc *_crtc) +{ + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + unsigned long irqflags; + + if (gen11_dsi_configure_te(crtc, true)) + return 0; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + /* Even if there is no DMC, frame counter can get stuck when + * PSR is active as no frames are generated, so check only for PSR. + */ + if (HAS_PSR(dev_priv)) + drm_crtc_vblank_restore(&crtc->base); + + return 0; +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +void i8xx_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +void i915gm_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + + i8xx_disable_vblank(crtc); + + if (--dev_priv->vblank_enabled == 0) + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); +} + +void i965_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +void ilk_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ilk_disable_display_irq(dev_priv, bit); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +void bdw_disable_vblank(struct drm_crtc *_crtc) +{ + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + unsigned long irqflags; + + if (gen11_dsi_configure_te(crtc, false)) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + + if (IS_CHERRYVIEW(dev_priv)) + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); + else + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); + + i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); + intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); + + i9xx_pipestat_irq_reset(dev_priv); + + GEN3_IRQ_RESET(uncore, VLV_); + dev_priv->irq_mask = ~0u; +} + +void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + + u32 pipestat_mask; + u32 enable_mask; + enum pipe pipe; + + pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; + + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_enable_pipestat(dev_priv, pipe, pipestat_mask); + + enable_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_LPE_PIPE_A_INTERRUPT | + I915_LPE_PIPE_B_INTERRUPT; + + if (IS_CHERRYVIEW(dev_priv)) + enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | + I915_LPE_PIPE_C_INTERRUPT; + + drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); + + dev_priv->irq_mask = ~enable_mask; + + GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); +} + +void gen8_display_irq_reset(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + enum pipe pipe; + + if (!HAS_DISPLAY(dev_priv)) + return; + + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + + GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); + GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); +} + +void gen11_display_irq_reset(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + enum pipe pipe; + u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D); + + if (!HAS_DISPLAY(dev_priv)) + return; + + intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); + + if (DISPLAY_VER(dev_priv) >= 12) { + enum transcoder trans; + + for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); + intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); + } + } else { + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + } + + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + + GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); + GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); + + if (DISPLAY_VER(dev_priv) >= 14) + GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); + else + GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + GEN3_IRQ_RESET(uncore, SDE); +} + +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + u8 pipe_mask) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 extra_ier = GEN8_PIPE_VBLANK | + gen8_de_pipe_underrun_mask(dev_priv) | + gen8_de_pipe_flip_done_mask(dev_priv); + enum pipe pipe; + + spin_lock_irq(&dev_priv->irq_lock); + + if (!intel_irqs_enabled(dev_priv)) { + spin_unlock_irq(&dev_priv->irq_lock); + return; + } + + for_each_pipe_masked(dev_priv, pipe, pipe_mask) + GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + ~dev_priv->de_irq_mask[pipe] | extra_ier); + + spin_unlock_irq(&dev_priv->irq_lock); +} + +void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, + u8 pipe_mask) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + enum pipe pipe; + + spin_lock_irq(&dev_priv->irq_lock); + + if (!intel_irqs_enabled(dev_priv)) { + spin_unlock_irq(&dev_priv->irq_lock); + return; + } + + for_each_pipe_masked(dev_priv, pipe, pipe_mask) + GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + + spin_unlock_irq(&dev_priv->irq_lock); + + /* make sure we're done processing display irqs */ + intel_synchronize_irq(dev_priv); +} + +/* + * SDEIER is also touched by the interrupt handler to work around missed PCH + * interrupts. Hence we can't update it after the interrupt handler is enabled - + * instead we unconditionally enable all PCH interrupt sources here, but then + * only unmask them as needed with SDEIMR. + * + * Note that we currently do this after installing the interrupt handler, + * but before we enable the master interrupt. That should be sufficient + * to avoid races with the irq handler, assuming we have MSI. Shared legacy + * interrupts could still race. + */ +void ibx_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 mask; + + if (HAS_PCH_NOP(dev_priv)) + return; + + if (HAS_PCH_IBX(dev_priv)) + mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; + else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) + mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; + else + mask = SDE_GMBUS_CPT; + + GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); +} + +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) +{ + lockdep_assert_held(&dev_priv->irq_lock); + + if (dev_priv->display_irqs_enabled) + return; + + dev_priv->display_irqs_enabled = true; + + if (intel_irqs_enabled(dev_priv)) { + vlv_display_irq_reset(dev_priv); + vlv_display_irq_postinstall(dev_priv); + } +} + +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) +{ + lockdep_assert_held(&dev_priv->irq_lock); + + if (!dev_priv->display_irqs_enabled) + return; + + dev_priv->display_irqs_enabled = false; + + if (intel_irqs_enabled(dev_priv)) + vlv_display_irq_reset(dev_priv); +} + +void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + + u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_enables; + u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); + u32 de_port_enables; + u32 de_misc_masked = GEN8_DE_EDP_PSR; + u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D); + enum pipe pipe; + + if (!HAS_DISPLAY(dev_priv)) + return; + + if (DISPLAY_VER(dev_priv) <= 10) + de_misc_masked |= GEN8_DE_MISC_GSE; + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + de_port_masked |= BXT_DE_PORT_GMBUS; + + if (DISPLAY_VER(dev_priv) >= 11) { + enum port port; + + if (intel_bios_is_dsi_present(dev_priv, &port)) + de_port_masked |= DSI0_TE | DSI1_TE; + } + + de_pipe_enables = de_pipe_masked | + GEN8_PIPE_VBLANK | + gen8_de_pipe_underrun_mask(dev_priv) | + gen8_de_pipe_flip_done_mask(dev_priv); + + de_port_enables = de_port_masked; + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; + else if (IS_BROADWELL(dev_priv)) + de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; + + if (DISPLAY_VER(dev_priv) >= 12) { + enum transcoder trans; + + for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); + } + } else { + gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + } + + for_each_pipe(dev_priv, pipe) { + dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; + + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + de_pipe_enables); + } + + GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); + GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); + + if (IS_DISPLAY_VER(dev_priv, 11, 13)) { + u32 de_hpd_masked = 0; + u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | + GEN11_DE_TBT_HOTPLUG_MASK; + + GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, + de_hpd_enables); + } +} + +void mtp_irq_postinstall(struct drm_i915_private *i915) +{ + struct intel_uncore *uncore = &i915->uncore; + u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; + u32 de_hpd_mask = XELPDP_AUX_TC_MASK; + u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | + XELPDP_TBT_HOTPLUG_MASK; + + GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, + de_hpd_enables); + + GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); +} + +void icp_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 mask = SDE_GMBUS_ICP; + + GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); +} + +void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) +{ + if (!HAS_DISPLAY(dev_priv)) + return; + + gen8_de_irq_postinstall(dev_priv); + + intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, + GEN11_DISPLAY_IRQ_ENABLE); +} + diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h new file mode 100644 index 000000000000..874893f4f16d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_IRQ_H__ +#define __INTEL_DISPLAY_IRQ_H__ + +#include <linux/types.h> + +#include "intel_display_limits.h" + +enum pipe; +struct drm_i915_private; +struct drm_crtc; + +void valleyview_enable_display_irqs(struct drm_i915_private *i915); +void valleyview_disable_display_irqs(struct drm_i915_private *i915); + +void ilk_update_display_irq(struct drm_i915_private *i915, + u32 interrupt_mask, u32 enabled_irq_mask); +void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits); +void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits); + +void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask); +void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); +void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); + +void ibx_display_interrupt_update(struct drm_i915_private *i915, + u32 interrupt_mask, u32 enabled_irq_mask); +void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits); +void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); + +void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask); +void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask); +u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *i915); + +int i8xx_enable_vblank(struct drm_crtc *crtc); +int i915gm_enable_vblank(struct drm_crtc *crtc); +int i965_enable_vblank(struct drm_crtc *crtc); +int ilk_enable_vblank(struct drm_crtc *crtc); +int bdw_enable_vblank(struct drm_crtc *crtc); +void i8xx_disable_vblank(struct drm_crtc *crtc); +void i915gm_disable_vblank(struct drm_crtc *crtc); +void i965_disable_vblank(struct drm_crtc *crtc); +void ilk_disable_vblank(struct drm_crtc *crtc); +void bdw_disable_vblank(struct drm_crtc *crtc); + +void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); +void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); +void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl); +void gen11_display_irq_handler(struct drm_i915_private *i915); + +u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl); +void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir); + +void vlv_display_irq_reset(struct drm_i915_private *i915); +void gen8_display_irq_reset(struct drm_i915_private *i915); +void gen11_display_irq_reset(struct drm_i915_private *i915); + +void ibx_irq_postinstall(struct drm_i915_private *i915); +void vlv_display_irq_postinstall(struct drm_i915_private *i915); +void icp_irq_postinstall(struct drm_i915_private *i915); +void gen8_de_irq_postinstall(struct drm_i915_private *i915); +void mtp_irq_postinstall(struct drm_i915_private *i915); +void gen11_de_irq_postinstall(struct drm_i915_private *i915); + +u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe); +void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); +void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); +void i915_enable_asle_pipestat(struct drm_i915_private *i915); +void i9xx_pipestat_irq_reset(struct drm_i915_private *i915); + +void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); + +void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]); +void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]); + +#endif /* __INTEL_DISPLAY_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 7c9f4288329e..2f4f00ae2f57 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" @@ -1052,7 +1053,7 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; - u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; + u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask; enum dbuf_slice slice; drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, @@ -1112,7 +1113,7 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; + unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask; u32 mask, val, i; if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) @@ -1164,31 +1165,39 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) - I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", + I915_STATE_WARN(dev_priv, crtc->active, + "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); - I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), + I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), "Display power well on\n"); - I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); - I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); - I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); - I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, "Panel power on\n"); - I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); if (IS_HASWELL(dev_priv)) - I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, "CPU PWM2 enabled\n"); - I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, "PCH PWM1 enabled\n"); - I915_STATE_WARN((intel_de_read(dev_priv, UTIL_PIN_CTL) & - (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == - (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), + I915_STATE_WARN(dev_priv, + (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), "Utility pin enabled in PWM mode\n"); - I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, + I915_STATE_WARN(dev_priv, + intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); /* @@ -1197,7 +1206,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ - I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); + I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv), + "IRQs enabled\n"); } static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) @@ -1558,7 +1568,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; - unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; + unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask; int config, i; /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ @@ -2021,7 +2031,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915) /** * intel_power_domains_suspend - suspend power domain state * @i915: i915 device instance - * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) + * @s2idle: specifies whether we go to idle, or deeper sleep * * This function prepares the hardware power domain state before entering * system suspend. @@ -2029,8 +2039,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915) * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ -void intel_power_domains_suspend(struct drm_i915_private *i915, - enum i915_drm_suspend_mode suspend_mode) +void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) { struct i915_power_domains *power_domains = &i915->display.power.domains; intel_wakeref_t wakeref __maybe_unused = @@ -2045,8 +2054,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && - suspend_mode == I915_DRM_SUSPEND_IDLE && + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 8e96be8e6330..be1a87bde0c9 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -171,8 +171,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv); void intel_power_domains_enable(struct drm_i915_private *dev_priv); void intel_power_domains_disable(struct drm_i915_private *dev_priv); -void intel_power_domains_suspend(struct drm_i915_private *dev_priv, - enum i915_drm_suspend_mode); +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, bool s2idle); void intel_power_domains_resume(struct drm_i915_private *dev_priv); void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 6645eb1911d8..1118ee9d224c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1251,22 +1251,11 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_INIT); -#define XELPD_PW_2_POWER_DOMAINS \ - XELPD_PW_B_POWER_DOMAINS, \ - XELPD_PW_C_POWER_DOMAINS, \ - XELPD_PW_D_POWER_DOMAINS, \ - POWER_DOMAIN_PORT_DDI_LANES_C, \ - POWER_DOMAIN_PORT_DDI_LANES_D, \ - POWER_DOMAIN_PORT_DDI_LANES_E, \ +#define XELPD_DC_OFF_PORT_POWER_DOMAINS \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ POWER_DOMAIN_PORT_DDI_LANES_TC4, \ - POWER_DOMAIN_VGA, \ - POWER_DOMAIN_AUDIO_PLAYBACK, \ - POWER_DOMAIN_AUX_IO_C, \ - POWER_DOMAIN_AUX_IO_D, \ - POWER_DOMAIN_AUX_IO_E, \ POWER_DOMAIN_AUX_C, \ POWER_DOMAIN_AUX_D, \ POWER_DOMAIN_AUX_E, \ @@ -1279,6 +1268,20 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, POWER_DOMAIN_AUX_TBT3, \ POWER_DOMAIN_AUX_TBT4 +#define XELPD_PW_2_POWER_DOMAINS \ + XELPD_PW_B_POWER_DOMAINS, \ + XELPD_PW_C_POWER_DOMAINS, \ + XELPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_IO_C, \ + POWER_DOMAIN_AUX_IO_D, \ + POWER_DOMAIN_AUX_IO_E, \ + XELPD_DC_OFF_PORT_POWER_DOMAINS + I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, XELPD_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); @@ -1301,7 +1304,9 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, */ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, - XELPD_PW_2_POWER_DOMAINS, + XELPD_DC_OFF_PORT_POWER_DOMAINS, + XELPD_PW_C_POWER_DOMAINS, + XELPD_PW_D_POWER_DOMAINS, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUX_A, @@ -1310,14 +1315,18 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); -static const struct i915_power_well_desc xelpd_power_wells_main[] = { +static const struct i915_power_well_desc xelpd_power_wells_dc_off[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &xelpd_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, - }, { + } +}; + +static const struct i915_power_well_desc xelpd_power_wells_main[] = { + { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &xelpd_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, @@ -1378,6 +1387,11 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), + ), + .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, + }, { + .instances = &I915_PW_INSTANCES( I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), @@ -1385,6 +1399,8 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { ), .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, + /* WA_14017248603: adlp */ + .enable_timeout = 500, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), @@ -1400,6 +1416,34 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = { static const struct i915_power_well_desc_list xelpd_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off), + I915_PW_DESCRIPTORS(xelpd_power_wells_main), +}; + +I915_DECL_PW_DOMAINS(xehpd_pwdoms_dc_off, + XELPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_DC_OFF, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xehpd_power_wells_dc_off[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xehpd_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + } +}; + +static const struct i915_power_well_desc_list xehpd_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xehpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpd_power_wells_main), }; @@ -1423,15 +1467,6 @@ I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2, XELPDP_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); -I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off, - XELPDP_PW_2_POWER_DOMAINS, - POWER_DOMAIN_AUDIO_MMIO, - POWER_DOMAIN_MODESET, - POWER_DOMAIN_AUX_A, - POWER_DOMAIN_AUX_B, - POWER_DOMAIN_DC_OFF, - POWER_DOMAIN_INIT); - I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1, POWER_DOMAIN_AUX_USBC1, POWER_DOMAIN_AUX_TBT1); @@ -1451,12 +1486,6 @@ I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4, static const struct i915_power_well_desc xelpdp_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( - I915_PW("DC_off", &xelpdp_pwdoms_dc_off, - .id = SKL_DISP_DC_OFF), - ), - .ops = &gen9_dc_off_power_well_ops, - }, { - .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &xelpdp_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), @@ -1512,6 +1541,7 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = { static const struct i915_power_well_desc_list xelpdp_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpdp_power_wells_main), }; @@ -1624,6 +1654,8 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) if (DISPLAY_VER(i915) >= 14) return set_power_wells(power_domains, xelpdp_power_wells); + else if (IS_DG2(i915)) + return set_power_wells(power_domains, xehpd_power_wells); else if (DISPLAY_VER(i915) >= 13) return set_power_wells(power_domains, xelpd_power_wells); else if (IS_DG1(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 62bafcbc7937..916009894d89 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -5,11 +5,13 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_crt.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" @@ -253,6 +255,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + int timeout = power_well->desc->enable_timeout ? : 1; /* * For some power wells we're not supposed to watch the status bit for @@ -266,7 +269,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ if (intel_de_wait_for_set(dev_priv, regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { + HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", intel_power_well_name(power_well)); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index ba7cb977e7c7..e494df379e6c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -110,6 +110,8 @@ struct i915_power_well_desc { * Thunderbolt mode. */ u16 is_tc_tbt:1; + /* Enable timeout if greater than the default 1ms */ + u16 enable_timeout; }; struct i915_power_well { diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h index 755c1ea8225c..2f07b7afa3bf 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h +++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h @@ -8,7 +8,7 @@ #include "i915_reg_defs.h" -#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display.mmio_offset) +#define DISPLAY_MMIO_BASE(dev_priv) (DISPLAY_INFO(dev_priv)->mmio_offset) #define VLV_DISPLAY_BASE 0x180000 @@ -36,14 +36,14 @@ * Device info offset array based helpers for groups of registers with unevenly * spaced base offsets. */ -#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.pipe_offsets[(pipe)] - \ - INTEL_INFO(dev_priv)->display.pipe_offsets[PIPE_A] + \ +#define _MMIO_PIPE2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->pipe_offsets[(pipe)] - \ + DISPLAY_INFO(dev_priv)->pipe_offsets[PIPE_A] + \ DISPLAY_MMIO_BASE(dev_priv) + (reg)) -#define _MMIO_TRANS2(tran, reg) _MMIO(INTEL_INFO(dev_priv)->display.trans_offsets[(tran)] - \ - INTEL_INFO(dev_priv)->display.trans_offsets[TRANSCODER_A] + \ +#define _MMIO_TRANS2(tran, reg) _MMIO(DISPLAY_INFO(dev_priv)->trans_offsets[(tran)] - \ + DISPLAY_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + \ DISPLAY_MMIO_BASE(dev_priv) + (reg)) -#define _MMIO_CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.cursor_offsets[(pipe)] - \ - INTEL_INFO(dev_priv)->display.cursor_offsets[PIPE_A] + \ +#define _MMIO_CURSOR2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->cursor_offsets[(pipe)] - \ + DISPLAY_INFO(dev_priv)->cursor_offsets[PIPE_A] + \ DISPLAY_MMIO_BASE(dev_priv) + (reg)) #endif /* __INTEL_DISPLAY_REG_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c new file mode 100644 index 000000000000..17178d5d7788 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_atomic_helper.h> + +#include "i915_drv.h" +#include "intel_clock_gating.h" +#include "intel_display_driver.h" +#include "intel_display_reset.h" +#include "intel_display_types.h" +#include "intel_hotplug.h" +#include "intel_pps.h" + +static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) +{ + return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && + intel_has_gpu_reset(to_gt(dev_priv))); +} + +void intel_display_reset_prepare(struct drm_i915_private *dev_priv) +{ + struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx; + struct drm_atomic_state *state; + int ret; + + if (!HAS_DISPLAY(dev_priv)) + return; + + /* reset doesn't touch the display */ + if (!dev_priv->params.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + /* We have a modeset vs reset deadlock, defensively unbreak it. */ + set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); + smp_mb__after_atomic(); + wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); + + if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { + drm_dbg_kms(&dev_priv->drm, + "Modeset potentially stuck, unbreaking through wedging\n"); + intel_gt_set_wedged(to_gt(dev_priv)); + } + + /* + * Need mode_config.mutex so that we don't + * trample ongoing ->detect() and whatnot. + */ + mutex_lock(&dev_priv->drm.mode_config.mutex); + drm_modeset_acquire_init(ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(ctx); + } + /* + * Disabling the crtcs gracefully seems nicer. Also the + * g33 docs say we should at least disable all the planes. + */ + state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", + ret); + return; + } + + ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx); + if (ret) { + drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", + ret); + drm_atomic_state_put(state); + return; + } + + dev_priv->display.restore.modeset_state = state; + state->acquire_ctx = ctx; +} + +void intel_display_reset_finish(struct drm_i915_private *i915) +{ + struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx; + struct drm_atomic_state *state; + int ret; + + if (!HAS_DISPLAY(i915)) + return; + + /* reset doesn't touch the display */ + if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags)) + return; + + state = fetch_and_zero(&i915->display.restore.modeset_state); + if (!state) + goto unlock; + + /* reset doesn't touch the display */ + if (!gpu_reset_clobbers_display(i915)) { + /* for testing only restore the display */ + ret = drm_atomic_helper_commit_duplicated_state(state, ctx); + if (ret) { + drm_WARN_ON(&i915->drm, ret == -EDEADLK); + drm_err(&i915->drm, + "Restoring old state failed with %i\n", ret); + } + } else { + /* + * The display has been reset as well, + * so need a full re-initialization. + */ + intel_pps_unlock_regs_wa(i915); + intel_display_driver_init_hw(i915); + intel_clock_gating_init(i915); + intel_hpd_init(i915); + + ret = __intel_display_driver_resume(i915, state, ctx); + if (ret) + drm_err(&i915->drm, + "Restoring old state failed with %i\n", ret); + + intel_hpd_poll_disable(i915); + } + + drm_atomic_state_put(state); +unlock: + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + mutex_unlock(&i915->drm.mode_config.mutex); + + clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.h b/drivers/gpu/drm/i915/display/intel_display_reset.h new file mode 100644 index 000000000000..f06d0d35b86b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_reset.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_RESET_H__ +#define __INTEL_RESET_H__ + +struct drm_i915_private; + +void intel_display_reset_prepare(struct drm_i915_private *i915); +void intel_display_reset_finish(struct drm_i915_private *i915); + +#endif /* __INTEL_RESET_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index 651ea8564e1b..99bdb833591c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -14,7 +14,6 @@ #include <linux/tracepoint.h> #include "i915_drv.h" -#include "i915_irq.h" #include "intel_crtc.h" #include "intel_display_types.h" #include "intel_vblank.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 47395b39c8f4..731f2ec04d5c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -233,14 +233,26 @@ struct intel_encoder { * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and * device interrupts are disabled. + * All modeset locks are held while the hook is called. */ void (*suspend)(struct intel_encoder *); /* + * Called without the modeset locks held after the suspend() hook for + * all encoders have been called. + */ + void (*suspend_complete)(struct intel_encoder *encoder); + /* * Called during system reboot/shutdown after all the * encoders have been disabled and suspended. + * All modeset locks are held while the hook is called. */ void (*shutdown)(struct intel_encoder *encoder); /* + * Called without the modeset locks held after the shutdown() hook for + * all encoders have been called. + */ + void (*shutdown_complete)(struct intel_encoder *encoder); + /* * Enable/disable the clock to the port. */ void (*enable_clock)(struct intel_encoder *encoder, @@ -643,6 +655,9 @@ struct intel_atomic_state { struct __intel_global_objs_state *global_objs; int num_global_objs; + /* Internal commit, as opposed to userspace/client initiated one */ + bool internal; + bool dpll_set, modeset; struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; @@ -980,6 +995,38 @@ struct intel_link_m_n { u32 link_n; }; +struct intel_csc_matrix { + u16 coeff[9]; + u16 preoff[3]; + u16 postoff[3]; +}; + +struct intel_c10pll_state { + u32 clock; /* in KHz */ + u8 tx; + u8 cmn; + u8 pll[20]; +}; + +struct intel_c20pll_state { + u32 link_bit_rate; + u32 clock; /* in kHz */ + u16 tx[3]; + u16 cmn[4]; + union { + u16 mplla[10]; + u16 mpllb[11]; + }; +}; + +struct intel_cx0pll_state { + union { + struct intel_c10pll_state c10; + struct intel_c20pll_state c20; + }; + bool ssc_enabled; +}; + struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1021,6 +1068,8 @@ struct intel_crtc_state { /* actual state of LUTs */ struct drm_property_blob *pre_csc_lut, *post_csc_lut; + struct intel_csc_matrix csc, output_csc; + /** * quirks - bitfield with hw state readout quirks * @@ -1123,6 +1172,7 @@ struct intel_crtc_state { union { struct intel_dpll_hw_state dpll_hw_state; struct intel_mpllb_state mpllb_state; + struct intel_cx0pll_state cx0pll_state; }; /* @@ -1275,15 +1325,27 @@ struct intel_crtc_state { /* HDMI High TMDS char rate ratio */ bool hdmi_high_tmds_clock_ratio; - /* Output format RGB/YCBCR etc */ + /* + * Output format RGB/YCBCR etc., that is coming out + * at the end of the pipe. + */ enum intel_output_format output_format; + /* + * Sink output format RGB/YCBCR etc., that is going + * into the sink. + */ + enum intel_output_format sink_format; + /* enable pipe gamma? */ bool gamma_enable; /* enable pipe csc? */ bool csc_enable; + /* enable vlv/chv wgc csc? */ + bool wgc_enable; + /* big joiner pipe bitmask */ u8 bigjoiner_pipes; @@ -1524,8 +1586,6 @@ struct intel_hdmi { enum drm_dp_dual_mode_type type; int max_tmds_clock; } dp_dual_mode; - bool has_hdmi_sink; - bool has_audio; struct intel_connector *attached_connector; struct cec_notifier *cec_notifier; }; @@ -1645,8 +1705,6 @@ struct intel_dp { u8 lane_count; u8 sink_count; bool link_trained; - bool has_hdmi_sink; - bool has_audio; bool reset_link_params; bool use_max_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; @@ -1728,6 +1786,7 @@ struct intel_dp { int pcon_max_frl_bw; u8 max_bpc; bool ycbcr_444_to_420; + bool ycbcr420_passthrough; bool rgb_to_ycbcr; } dfp; @@ -1814,10 +1873,6 @@ struct intel_dp_mst_encoder { struct intel_connector *connector; }; -struct intel_load_detect_pipe { - struct drm_atomic_state *restore_state; -}; - static inline struct intel_encoder * intel_attached_encoder(struct intel_connector *connector) { diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c index 57cc3edba016..a001232ad445 100644 --- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c @@ -11,6 +11,15 @@ #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" +/** + * intel_dkl_phy_init - initialize Dekel PHY + * @i915: i915 device instance + */ +void intel_dkl_phy_init(struct drm_i915_private *i915) +{ + spin_lock_init(&i915->display.dkl.phy_lock); +} + static void dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg) { diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h index 570ee36f9386..5956ec3e940b 100644 --- a/drivers/gpu/drm/i915/display/intel_dkl_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h @@ -12,6 +12,7 @@ struct drm_i915_private; +void intel_dkl_phy_init(struct drm_i915_private *i915); u32 intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg); void diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 529ee22be872..f4192fda1a76 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -44,8 +44,8 @@ #include <drm/drm_probe_helper.h> #include "g4x_dp.h" -#include "i915_debugfs.h" #include "i915_drv.h" +#include "i915_irq.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" @@ -53,6 +53,7 @@ #include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" +#include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -67,6 +68,7 @@ #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_hotplug.h" +#include "intel_hotplug_irq.h" #include "intel_lspcon.h" #include "intel_lvds.h" #include "intel_panel.h" @@ -421,6 +423,18 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp) return 810000; } +static int mtl_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (intel_is_c10phy(i915, phy)) + return intel_dp_is_edp(intel_dp) ? 675000 : 810000; + + return 2000000; +} + static int vbt_max_link_rate(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; @@ -445,6 +459,10 @@ static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { /* The values must be in increasing order */ + static const int mtl_rates[] = { + 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, + 810000, 1000000, 1350000, 2000000, + }; static const int icl_rates[] = { 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 1000000, 1350000, @@ -470,7 +488,11 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) drm_WARN_ON(&dev_priv->drm, intel_dp->source_rates || intel_dp->num_source_rates); - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(dev_priv) >= 14) { + source_rates = mtl_rates; + size = ARRAY_SIZE(mtl_rates); + max_rate = mtl_max_source_rate(intel_dp); + } else if (DISPLAY_VER(dev_priv) >= 11) { source_rates = icl_rates; size = ARRAY_SIZE(icl_rates); if (IS_DG2(dev_priv)) @@ -828,26 +850,88 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } +static bool source_can_output(struct intel_dp *intel_dp, + enum intel_output_format format) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + switch (format) { + case INTEL_OUTPUT_FORMAT_RGB: + return true; + + case INTEL_OUTPUT_FORMAT_YCBCR444: + /* + * No YCbCr output support on gmch platforms. + * Also, ILK doesn't seem capable of DP YCbCr output. + * The displayed image is severly corrupted. SNB+ is fine. + */ + return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); + + case INTEL_OUTPUT_FORMAT_YCBCR420: + /* Platform < Gen 11 cannot output YCbCr420 format */ + return DISPLAY_VER(i915) >= 11; + + default: + MISSING_CASE(format); + return false; + } +} + +static bool +dfp_can_convert_from_rgb(struct intel_dp *intel_dp, + enum intel_output_format sink_format) +{ + if (!drm_dp_is_branch(intel_dp->dpcd)) + return false; + + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) + return intel_dp->dfp.rgb_to_ycbcr; + + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) + return intel_dp->dfp.rgb_to_ycbcr && + intel_dp->dfp.ycbcr_444_to_420; + + return false; +} + +static bool +dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, + enum intel_output_format sink_format) +{ + if (!drm_dp_is_branch(intel_dp->dpcd)) + return false; + + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) + return intel_dp->dfp.ycbcr_444_to_420; + + return false; +} + static enum intel_output_format intel_dp_output_format(struct intel_connector *connector, - bool ycbcr_420_output) + enum intel_output_format sink_format) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + enum intel_output_format output_format; if (intel_dp->force_dsc_output_format) return intel_dp->force_dsc_output_format; - if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output) - return INTEL_OUTPUT_FORMAT_RGB; + if (sink_format == INTEL_OUTPUT_FORMAT_RGB || + dfp_can_convert_from_rgb(intel_dp, sink_format)) + output_format = INTEL_OUTPUT_FORMAT_RGB; - if (intel_dp->dfp.rgb_to_ycbcr && - intel_dp->dfp.ycbcr_444_to_420) - return INTEL_OUTPUT_FORMAT_RGB; + else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || + dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) + output_format = INTEL_OUTPUT_FORMAT_YCBCR444; - if (intel_dp->dfp.ycbcr_444_to_420) - return INTEL_OUTPUT_FORMAT_YCBCR444; else - return INTEL_OUTPUT_FORMAT_YCBCR420; + output_format = INTEL_OUTPUT_FORMAT_YCBCR420; + + drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); + + return output_format; } int intel_dp_min_bpp(enum intel_output_format output_format) @@ -871,13 +955,27 @@ static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) return bpp; } +static enum intel_output_format +intel_dp_sink_format(struct intel_connector *connector, + const struct drm_display_mode *mode) +{ + const struct drm_display_info *info = &connector->base.display_info; + + if (drm_mode_is_420_only(info, mode)) + return INTEL_OUTPUT_FORMAT_YCBCR420; + + return INTEL_OUTPUT_FORMAT_RGB; +} + static int intel_dp_mode_min_output_bpp(struct intel_connector *connector, const struct drm_display_mode *mode) { - const struct drm_display_info *info = &connector->base.display_info; - enum intel_output_format output_format = - intel_dp_output_format(connector, drm_mode_is_420_only(info, mode)); + enum intel_output_format output_format, sink_format; + + sink_format = intel_dp_sink_format(connector, mode); + + output_format = intel_dp_output_format(connector, sink_format); return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); } @@ -916,7 +1014,8 @@ static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) static enum drm_mode_status intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, - int clock, int bpc, bool ycbcr420_output, + int clock, int bpc, + enum intel_output_format sink_format, bool respect_downstream_limits) { int tmds_clock, min_tmds_clock, max_tmds_clock; @@ -924,7 +1023,7 @@ intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, if (!respect_downstream_limits) return MODE_OK; - tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); + tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); min_tmds_clock = intel_dp->dfp.min_tmds_clock; max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); @@ -946,7 +1045,7 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, struct intel_dp *intel_dp = intel_attached_dp(connector); const struct drm_display_info *info = &connector->base.display_info; enum drm_mode_status status; - bool ycbcr_420_only; + enum intel_output_format sink_format; /* If PCON supports FRL MODE, check FRL bandwidth constraints */ if (intel_dp->dfp.pcon_max_frl_bw) { @@ -971,20 +1070,20 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, target_clock > intel_dp->dfp.max_dotclock) return MODE_CLOCK_HIGH; - ycbcr_420_only = drm_mode_is_420_only(info, mode); + sink_format = intel_dp_sink_format(connector, mode); /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ status = intel_dp_tmds_clock_valid(intel_dp, target_clock, - 8, ycbcr_420_only, true); + 8, sink_format, true); if (status != MODE_OK) { - if (ycbcr_420_only || + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, mode)) return status; - + sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; status = intel_dp_tmds_clock_valid(intel_dp, target_clock, - 8, true, true); + 8, sink_format, true); if (status != MODE_OK) return status; } @@ -1041,6 +1140,9 @@ intel_dp_mode_valid(struct drm_connector *_connector, if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; + if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) + return MODE_H_ILLEGAL; + max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); @@ -1048,13 +1150,6 @@ intel_dp_mode_valid(struct drm_connector *_connector, mode_rate = intel_dp_link_required(target_clock, intel_dp_mode_min_output_bpp(connector, mode)); - if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) - return MODE_H_ILLEGAL; - - /* - * Output bpp is stored in 6.4 format so right shift by 4 to get the - * integer value since we support only integer values of bpp. - */ if (HAS_DSC(dev_priv) && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { /* @@ -1063,6 +1158,10 @@ intel_dp_mode_valid(struct drm_connector *_connector, */ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); + /* + * Output bpp is stored in 6.4 format so right shift by 4 to get the + * integer value since we support only integer values of bpp. + */ if (intel_dp_is_edp(intel_dp)) { dsc_max_output_bpp = drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; @@ -1188,6 +1287,13 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, } } +bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + + return connector->base.display_info.is_hdmi; +} + static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { @@ -1220,19 +1326,10 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } -static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || - (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && - intel_dp->dfp.ycbcr_444_to_420); -} - static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int bpc, bool respect_downstream_limits) { - bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state); int clock = crtc_state->hw.adjusted_mode.crtc_clock; /* @@ -1252,8 +1349,8 @@ static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, for (; bpc >= 8; bpc -= 2) { if (intel_hdmi_bpc_possible(crtc_state, bpc, - intel_dp->has_hdmi_sink, ycbcr420_output) && - intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output, + intel_dp_has_hdmi_sink(intel_dp)) && + intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, respect_downstream_limits) == MODE_OK) return bpc; } @@ -2069,6 +2166,7 @@ static bool intel_dp_has_audio(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_connector *connector = intel_dp->attached_connector; const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); @@ -2076,7 +2174,7 @@ static bool intel_dp_has_audio(struct intel_encoder *encoder, return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - return intel_dp->has_audio; + return connector->base.display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } @@ -2097,23 +2195,27 @@ intel_dp_compute_output_format(struct intel_encoder *encoder, ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); - crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only); - - if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) { + if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { drm_dbg_kms(&i915->drm, "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); - crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; + crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; + } else { + crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); } + crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); + ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, respect_downstream_limits); if (ret) { - if (intel_dp_is_ycbcr420(intel_dp, crtc_state) || + if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, adjusted_mode)) return ret; - crtc_state->output_format = intel_dp_output_format(connector, true); + crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; + crtc_state->output_format = intel_dp_output_format(connector, + crtc_state->sink_format); ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, respect_downstream_limits); } @@ -2637,7 +2739,7 @@ frl_trained: static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) { if (drm_dp_is_branch(intel_dp->dpcd) && - intel_dp->has_hdmi_sink && + intel_dp_has_hdmi_sink(intel_dp) && intel_dp_hdmi_sink_max_frl(intel_dp) > 0) return true; @@ -2795,6 +2897,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + bool ycbcr444_to_420 = false; + bool rgb_to_ycbcr = false; u8 tmp; if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) @@ -2803,16 +2907,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, if (!drm_dp_is_branch(intel_dp->dpcd)) return; - tmp = intel_dp->has_hdmi_sink ? - DP_HDMI_DVI_OUTPUT_CONFIG : 0; + tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", - str_enable_disable(intel_dp->has_hdmi_sink)); + str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); - tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && - intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; + if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + switch (crtc_state->output_format) { + case INTEL_OUTPUT_FORMAT_YCBCR420: + break; + case INTEL_OUTPUT_FORMAT_YCBCR444: + ycbcr444_to_420 = true; + break; + case INTEL_OUTPUT_FORMAT_RGB: + rgb_to_ycbcr = true; + ycbcr444_to_420 = true; + break; + default: + MISSING_CASE(crtc_state->output_format); + break; + } + } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { + switch (crtc_state->output_format) { + case INTEL_OUTPUT_FORMAT_YCBCR444: + break; + case INTEL_OUTPUT_FORMAT_RGB: + rgb_to_ycbcr = true; + break; + default: + MISSING_CASE(crtc_state->output_format); + break; + } + } + + tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) @@ -2820,13 +2950,12 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); - tmp = intel_dp->dfp.rgb_to_ycbcr ? - DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; + tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) drm_dbg_kms(&i915->drm, - "Failed to %s protocol converter RGB->YCbCr conversion mode\n", - str_enable_disable(tmp)); + "Failed to %s protocol converter RGB->YCbCr conversion mode\n", + str_enable_disable(tmp)); } bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) @@ -4110,9 +4239,9 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp, return false; } -static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, - struct drm_modeset_acquire_ctx *ctx, - u8 *pipe_mask) +int intel_dp_get_active_pipes(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; @@ -4121,9 +4250,6 @@ static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, *pipe_mask = 0; - if (!intel_dp_needs_link_retrain(intel_dp)) - return 0; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state = @@ -4157,9 +4283,6 @@ static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, } drm_connector_list_iter_end(&conn_iter); - if (!intel_dp_needs_link_retrain(intel_dp)) - *pipe_mask = 0; - return ret; } @@ -4188,13 +4311,19 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, if (ret) return ret; - ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask); + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + + ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); if (ret) return ret; if (pipe_mask == 0) return 0; + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", encoder->base.base.id, encoder->base.name); @@ -4616,57 +4745,44 @@ intel_dp_update_dfp(struct intel_dp *intel_dp, intel_dp_get_pcon_dsc_cap(intel_dp); } +static bool +intel_dp_can_ycbcr420(struct intel_dp *intel_dp) +{ + if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && + (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) + return true; + + if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && + dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) + return true; + + if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && + dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) + return true; + + return false; +} + static void intel_dp_update_420(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; - - /* No YCbCr output support on gmch platforms */ - if (HAS_GMCH(i915)) - return; - - /* - * ILK doesn't seem capable of DP YCbCr output. The - * displayed image is severly corrupted. SNB+ is fine. - */ - if (IS_IRONLAKE(i915)) - return; - is_branch = drm_dp_is_branch(intel_dp->dpcd); - ycbcr_420_passthrough = + intel_dp->dfp.ycbcr420_passthrough = drm_dp_downstream_420_passthrough(intel_dp->dpcd, intel_dp->downstream_ports); /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ - ycbcr_444_to_420 = + intel_dp->dfp.ycbcr_444_to_420 = dp_to_dig_port(intel_dp)->lspcon.active || drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, intel_dp->downstream_ports); - rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, - intel_dp->downstream_ports, - DP_DS_HDMI_BT709_RGB_YCBCR_CONV); - - if (DISPLAY_VER(i915) >= 11) { - /* Let PCON convert from RGB->YCbCr if possible */ - if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { - intel_dp->dfp.rgb_to_ycbcr = true; - intel_dp->dfp.ycbcr_444_to_420 = true; - connector->base.ycbcr_420_allowed = true; - } else { - /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ - intel_dp->dfp.ycbcr_444_to_420 = - ycbcr_444_to_420 && !ycbcr_420_passthrough; - - connector->base.ycbcr_420_allowed = - !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; - } - } else { - /* 4:4:4->4:2:0 conversion is the only way */ - intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; + intel_dp->dfp.rgb_to_ycbcr = + drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, + intel_dp->downstream_ports, + DP_DS_HDMI_BT709_RGB_YCBCR_CONV); - connector->base.ycbcr_420_allowed = ycbcr_444_to_420; - } + connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", @@ -4702,10 +4818,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp) /* FIXME: Get rid of drm_edid_raw() */ edid = drm_edid_raw(drm_edid); - if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { - intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); - intel_dp->has_audio = drm_detect_monitor_audio(edid); - } drm_dp_cec_set_edid(&intel_dp->aux, edid); } @@ -4719,9 +4831,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) drm_edid_free(connector->detect_edid); connector->detect_edid = NULL; - intel_dp->has_hdmi_sink = false; - intel_dp->has_audio = false; - intel_dp->dfp.max_bpc = 0; intel_dp->dfp.max_dotclock = 0; intel_dp->dfp.min_tmds_clock = 0; @@ -5370,6 +5479,15 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, goto out_vdd_off; } + /* + * Enable HPD sense for live status check. + * intel_hpd_irq_setup() will turn it off again + * if it's no longer needed later. + * + * The DPCD probe below will make sure VDD is on. + */ + intel_hpd_enable_detection(encoder); + /* Cache DPCD and EDID for edp. */ has_dpcd = intel_edp_init_dpcd(intel_dp); @@ -5381,6 +5499,24 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, goto out_vdd_off; } + /* + * VBT and straps are liars. Also check HPD as that seems + * to be the most reliable piece of information available. + */ + if (!intel_digital_port_connected(encoder)) { + /* + * If this fails, presume the DPCD answer came + * from some other port using the same AUX CH. + * + * FIXME maybe cleaner to check this before the + * DPCD read? Would need sort out the VDD handling... + */ + drm_info(&dev_priv->drm, + "[ENCODER:%d:%s] HPD is down, disabling eDP\n", + encoder->base.base.id, encoder->base.name); + goto out_vdd_off; + } + mutex_lock(&dev_priv->drm.mode_config.mutex); drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc); if (!drm_edid) { @@ -5583,6 +5719,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, return true; fail: + intel_display_power_flush_work(dev_priv); drm_connector_cleanup(connector); return false; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index ef39e4f7a329..22099de3ca45 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -42,6 +42,9 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count); +int intel_dp_get_active_pipes(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask); int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); @@ -62,6 +65,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, struct link_config_limits *limits, int timeslots, bool recompute_pipe_bpp); +bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp); bool intel_dp_is_edp(struct intel_dp *intel_dp); bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 705915d50565..197c6e81db14 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void) static int intel_dp_aux_fw_sync_len(void) { - int precharge = 16; /* 10-16 */ + int precharge = 10; /* 10-16 */ int preamble = 8; return precharge + preamble; @@ -161,14 +161,14 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, timeout = DP_AUX_CH_CTL_TIME_OUT_400us; return DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_INTERRUPT | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - timeout | - DP_AUX_CH_CTL_RECEIVE_ERROR | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + timeout | + DP_AUX_CH_CTL_RECEIVE_ERROR | + DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | + DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) | + DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider); } static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, @@ -185,14 +185,14 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, * ICL+: 4ms */ ret = DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_INTERRUPT | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_TIME_OUT_MAX | - DP_AUX_CH_CTL_RECEIVE_ERROR | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | - DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_TIME_OUT_MAX | + DP_AUX_CH_CTL_RECEIVE_ERROR | + DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | + DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); if (intel_tc_port_in_tbt_alt_mode(dig_port)) ret |= DP_AUX_CH_CTL_TBT_IO; @@ -268,6 +268,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, intel_pps_check_power_unlocked(intel_dp); + /* + * FIXME PSR should be disabled here to prevent + * it using the same AUX CH simultaneously + */ + /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = intel_de_read_notrace(i915, ch_ctl); @@ -373,8 +378,7 @@ done: } /* Unload any bytes sent back from the other side */ - recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> - DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status); /* * By BSpec: "Message sizes of 0 or >20 are not allowed." @@ -810,3 +814,8 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) return aux_ch; } + +void intel_dp_aux_irq_handler(struct drm_i915_private *i915) +{ + wake_up_all(&i915->display.gmbus.wait_queue); +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 138e340f94ee..5b608f9d3499 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -7,6 +7,7 @@ #define __INTEL_DP_AUX_H__ enum aux_ch; +struct drm_i915_private; struct intel_dp; struct intel_encoder; @@ -15,4 +16,6 @@ void intel_dp_aux_init(struct intel_dp *intel_dp); enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); +void intel_dp_aux_irq_handler(struct drm_i915_private *i915); + #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h index 5702f318d537..5185345277c7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h @@ -50,35 +50,37 @@ _XELPDP_USBC3_AUX_CH_DATA1, \ _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4) -#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) -#define DP_AUX_CH_CTL_DONE (1 << 30) -#define DP_AUX_CH_CTL_INTERRUPT (1 << 29) -#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) -#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */ -#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) -#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) -#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) -#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 -#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) -#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) -#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) -#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 -#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) -#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) -#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) -#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) -#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) -#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) -#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 -#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) -#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) -#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) -#define DP_AUX_CH_CTL_TBT_IO (1 << 11) -#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) -#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) -#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) +#define DP_AUX_CH_CTL_SEND_BUSY REG_BIT(31) +#define DP_AUX_CH_CTL_DONE REG_BIT(30) +#define DP_AUX_CH_CTL_INTERRUPT REG_BIT(29) +#define DP_AUX_CH_CTL_TIME_OUT_ERROR REG_BIT(28) + +#define DP_AUX_CH_CTL_TIME_OUT_MASK REG_GENMASK(27, 26) +#define DP_AUX_CH_CTL_TIME_OUT_400us REG_FIELD_PREP(DP_AUX_CH_CTL_TIME_OUT_MASK, 0) +#define DP_AUX_CH_CTL_TIME_OUT_600us REG_FIELD_PREP(DP_AUX_CH_CTL_TIME_OUT_MASK, 1) +#define DP_AUX_CH_CTL_TIME_OUT_800us REG_FIELD_PREP(DP_AUX_CH_CTL_TIME_OUT_MASK, 2) +#define DP_AUX_CH_CTL_TIME_OUT_MAX REG_FIELD_PREP(DP_AUX_CH_CTL_TIME_OUT_MASK, 3) /* Varies per platform */ +#define DP_AUX_CH_CTL_RECEIVE_ERROR REG_BIT(25) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK REG_GENMASK(24, 20) +#define DP_AUX_CH_CTL_MESSAGE_SIZE(x) REG_FIELD_PREP(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, (x)) +#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK REG_GENMASK(19, 16) /* pre-skl */ +#define DP_AUX_CH_CTL_PRECHARGE_2US(x) REG_FIELD_PREP(DP_AUX_CH_CTL_PRECHARGE_2US_MASK, (x)) +#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) /* mtl+ */ +#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) /* mtl+ */ +#define DP_AUX_CH_CTL_AUX_AKSV_SELECT REG_BIT(15) +#define DP_AUX_CH_CTL_MANCHESTER_TEST REG_BIT(14) /* pre-hsw */ +#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL REG_BIT(14) /* skl+ */ +#define DP_AUX_CH_CTL_SYNC_TEST REG_BIT(13) /* pre-hsw */ +#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL REG_BIT(13) /* skl+ */ +#define DP_AUX_CH_CTL_DEGLITCH_TEST REG_BIT(12) /* pre-hsw */ +#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL REG_BIT(12) /* skl+ */ +#define DP_AUX_CH_CTL_PRECHARGE_TEST REG_BIT(11) /* pre-hsw */ +#define DP_AUX_CH_CTL_TBT_IO REG_BIT(11) /* icl+ */ +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK REG_GENMASK(10, 0) /* pre-skl */ +#define DP_AUX_CH_CTL_BIT_CLOCK_2X(x) REG_FIELD_PREP(DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK, (x)) +#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK REG_GENMASK(9, 5) /* skl+ */ +#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK, (c) - 1) +#define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */ +#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1) #endif /* __INTEL_DP_AUX_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index d638054c74ac..0952a707358c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -26,6 +26,27 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" +#define LT_MSG_PREFIX "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] " +#define LT_MSG_ARGS(_intel_dp, _dp_phy) (_intel_dp)->attached_connector->base.base.id, \ + (_intel_dp)->attached_connector->base.name, \ + dp_to_dig_port(_intel_dp)->base.base.base.id, \ + dp_to_dig_port(_intel_dp)->base.base.name, \ + drm_dp_phy_name(_dp_phy) + +#define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ + drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \ + LT_MSG_PREFIX _format, \ + LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) + +#define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ + if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ + drm_err(&dp_to_i915(_intel_dp)->drm, \ + LT_MSG_PREFIX _format, \ + LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ + else \ + lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ +} while (0) + static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); @@ -47,29 +68,21 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE], enum drm_dp_phy dp_phy) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[ENCODER:%d:%s][%s] failed to read the PHY caps\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n"); return; } - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - (int)sizeof(intel_dp->lttpr_phy_caps[0]), - phy_caps); + lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n", + (int)sizeof(intel_dp->lttpr_phy_caps[0]), + phy_caps); } static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int ret; ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, @@ -77,11 +90,9 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, if (ret < 0) goto reset_caps; - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n", - encoder->base.base.id, encoder->base.name, - (int)sizeof(intel_dp->lttpr_common_caps), - intel_dp->lttpr_common_caps); + lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n", + (int)sizeof(intel_dp->lttpr_common_caps), + intel_dp->lttpr_common_caps); /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ if (intel_dp->lttpr_common_caps[0] < 0x14) @@ -105,8 +116,6 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); int lttpr_count; int i; @@ -138,9 +147,8 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI return 0; if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n", - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, + "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); intel_dp_set_lttpr_transparent_mode(intel_dp, true); intel_dp_reset_lttpr_count(intel_dp); @@ -409,26 +417,22 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); int lane; if (intel_dp_is_uhbr(crtc_state)) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " - "TX FFE request: " TRAIN_REQ_FMT "\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - crtc_state->lane_count, - TRAIN_REQ_TX_FFE_ARGS(link_status)); + lt_dbg(intel_dp, dp_phy, + "128b/132b, lanes: %d, " + "TX FFE request: " TRAIN_REQ_FMT "\n", + crtc_state->lane_count, + TRAIN_REQ_TX_FFE_ARGS(link_status)); } else { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, " - "vswing request: " TRAIN_REQ_FMT ", " - "pre-emphasis request: " TRAIN_REQ_FMT "\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - crtc_state->lane_count, - TRAIN_REQ_VSWING_ARGS(link_status), - TRAIN_REQ_PREEMPH_ARGS(link_status)); + lt_dbg(intel_dp, dp_phy, + "8b/10b, lanes: %d, " + "vswing request: " TRAIN_REQ_FMT ", " + "pre-emphasis request: " TRAIN_REQ_FMT "\n", + crtc_state->lane_count, + TRAIN_REQ_VSWING_ARGS(link_status), + TRAIN_REQ_PREEMPH_ARGS(link_status)); } for (lane = 0; lane < 4; lane++) @@ -487,16 +491,11 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, u8 dp_train_pat) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); if (train_pat != DP_TRAINING_PATTERN_DISABLE) - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - dp_training_pattern_name(train_pat)); + lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n", + dp_training_pattern_name(train_pat)); intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); } @@ -531,24 +530,21 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (intel_dp_is_uhbr(crtc_state)) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " - "TX FFE presets: " TRAIN_SET_FMT "\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - crtc_state->lane_count, - TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); + lt_dbg(intel_dp, dp_phy, + "128b/132b, lanes: %d, " + "TX FFE presets: " TRAIN_SET_FMT "\n", + crtc_state->lane_count, + TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); } else { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, " - "vswing levels: " TRAIN_SET_FMT ", " - "pre-emphasis levels: " TRAIN_SET_FMT "\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - crtc_state->lane_count, - TRAIN_SET_VSWING_ARGS(intel_dp->train_set), - TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); + lt_dbg(intel_dp, dp_phy, + "8b/10b, lanes: %d, " + "vswing levels: " TRAIN_SET_FMT ", " + "pre-emphasis levels: " TRAIN_SET_FMT "\n", + crtc_state->lane_count, + TRAIN_SET_VSWING_ARGS(intel_dp->train_set), + TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); } if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) @@ -637,6 +633,38 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, return true; } +static void +intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + u8 link_config[2]; + + link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; + link_config[1] = intel_dp_is_uhbr(crtc_state) ? + DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; + drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); +} + +static void +intel_dp_update_link_bw_set(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + u8 link_bw, u8 rate_select) +{ + u8 link_config[2]; + + /* Write the link configuration data */ + link_config[0] = link_bw; + link_config[1] = crtc_state->lane_count; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + + /* eDP 1.4 rate select method. */ + if (!link_bw) + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, + &rate_select, 1); +} + /* * Prepare link training by configuring the link parameters. On DDI platforms * also enable the port here. @@ -645,9 +673,6 @@ static bool intel_dp_prepare_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - u8 link_config[2]; u8 link_bw, rate_select; if (intel_dp->prepare_link_retrain) @@ -668,41 +693,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, * link rates are not stable. */ if (!link_bw) { - struct intel_connector *connector = intel_dp->attached_connector; __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", - connector->base.base.id, connector->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n"); drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates)); } if (link_bw) - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n", - encoder->base.base.id, encoder->base.name, link_bw); + lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n", + link_bw); else - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n", - encoder->base.base.id, encoder->base.name, rate_select); - - /* Write the link configuration data */ - link_config[0] = link_bw; - link_config[1] = crtc_state->lane_count; - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); - - /* eDP 1.4 rate select method. */ - if (!link_bw) - drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, - &rate_select, 1); - - link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; - link_config[1] = intel_dp_is_uhbr(crtc_state) ? - DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; - drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + lt_dbg(intel_dp, DP_PHY_DPRX, + "Using LINK_RATE_SET value %02x\n", + rate_select); + /* + * Spec DP2.1 Section 3.5.2.16 + * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate + */ + intel_dp_update_downspread_ctrl(intel_dp, crtc_state); + intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw, + rate_select); return true; } @@ -737,15 +749,10 @@ void intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - link_status[0], link_status[1], link_status[2], - link_status[3], link_status[4], link_status[5]); + lt_dbg(intel_dp, dp_phy, + "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); } /* @@ -757,8 +764,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; int voltage_tries, cr_tries, max_cr_tries; u8 link_status[DP_LINK_STATUS_SIZE]; @@ -773,9 +778,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { - drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to enable link training\n"); return false; } @@ -798,35 +801,24 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { - drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to get link status\n"); return false; } if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Clock recovery OK\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n"); return true; } if (voltage_tries == 5) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n"); return false; } if (max_vswing_reached) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n"); return false; } @@ -834,10 +826,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to update link training\n"); return false; } @@ -853,10 +842,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, } intel_dp_dump_link_status(intel_dp, dp_phy, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), max_cr_tries); + lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n", + max_cr_tries); return false; } @@ -890,11 +877,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, return DP_TRAINING_PATTERN_4; } else if (crtc_state->port_clock == 810000) { if (!source_tps4) - drm_dbg_kms(&i915->drm, - "8.1 Gbps link rate without source TPS4 support\n"); + lt_dbg(intel_dp, dp_phy, + "8.1 Gbps link rate without source TPS4 support\n"); if (!sink_tps4) - drm_dbg_kms(&i915->drm, - "8.1 Gbps link rate without sink TPS4 support\n"); + lt_dbg(intel_dp, dp_phy, + "8.1 Gbps link rate without sink TPS4 support\n"); } /* @@ -908,11 +895,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, return DP_TRAINING_PATTERN_3; } else if (crtc_state->port_clock >= 540000) { if (!source_tps3) - drm_dbg_kms(&i915->drm, - ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); + lt_dbg(intel_dp, dp_phy, + ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); if (!sink_tps3) - drm_dbg_kms(&i915->drm, - ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + lt_dbg(intel_dp, dp_phy, + ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); } return DP_TRAINING_PATTERN_2; @@ -928,8 +915,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); int tries; u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; @@ -948,10 +933,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, training_pattern)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s][%s] Failed to start channel equalization\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n"); return false; } @@ -960,10 +942,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { - drm_err(&i915->drm, - "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to get link status\n"); break; } @@ -971,21 +950,15 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, if (!drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot " - "continue channel equalization\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, + "Clock recovery check failed, cannot continue channel equalization\n"); break; } if (drm_dp_channel_eq_ok(link_status, crtc_state->lane_count)) { channel_eq = true; - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n"); break; } @@ -993,10 +966,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_err(intel_dp, dp_phy, "Failed to update link training\n"); break; } } @@ -1004,10 +974,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, /* Try 5 times, else fail and try at lower BW */ if (tries == 5) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n", - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy)); + lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n"); } return channel_eq; @@ -1026,13 +993,12 @@ static int intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 sink_status; int ret; ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status); if (ret != 1) { - drm_dbg_kms(&i915->drm, "Failed to read sink status\n"); + lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n"); return ret < 0 ? ret : -EIO; } @@ -1058,9 +1024,6 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - intel_dp->link_trained = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); @@ -1069,9 +1032,7 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, if (intel_dp_is_uhbr(crtc_state) && wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] 128b/132b intra-hop not clearing\n", - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); } } @@ -1080,8 +1041,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct intel_connector *connector = intel_dp->attached_connector; - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool ret = false; if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) @@ -1093,13 +1052,10 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, ret = true; out: - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n", - connector->base.base.id, connector->base.name, - encoder->base.base.id, encoder->base.name, - drm_dp_phy_name(dp_phy), - ret ? "passed" : "failed", - crtc_state->port_clock, crtc_state->lane_count); + lt_dbg(intel_dp, dp_phy, + "Link Training %s at link rate = %d, lane count = %d\n", + ret ? "passed" : "failed", + crtc_state->port_clock, crtc_state->lane_count); return ret; } @@ -1108,13 +1064,15 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { + lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); + return; + } if (intel_dp->hobl_active) { - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[ENCODER:%d:%s] Link Training failed with HOBL active, " - "not enabling it from now on", - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, + "Link Training failed with HOBL active, not enabling it from now on\n"); intel_dp->hobl_failed = true; } else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state->port_clock, @@ -1161,8 +1119,6 @@ static bool intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 link_status[DP_LINK_STATUS_SIZE]; int delay_us; int try, max_tries = 20; @@ -1177,9 +1133,7 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_1)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to start 128b/132b TPS1\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n"); return false; } @@ -1187,27 +1141,21 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, /* Read the initial TX FFE settings. */ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to read TX FFE presets\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n"); return false; } /* Update signal levels and training set as requested. */ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to set initial TX FFE settings\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n"); return false; } /* Start transmitting 128b/132b TPS2. */ if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_2)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to start 128b/132b TPS2\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n"); return false; } @@ -1224,32 +1172,25 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to read link status\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Downstream link training failure\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, + "Downstream link training failure\n"); return false; } if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Lane channel eq done\n", - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n"); break; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Lane channel eq timeout\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n"); return false; } @@ -1259,18 +1200,14 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, /* Update signal levels and training set as requested. */ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to update TX FFE settings\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n"); return false; } } if (try == max_tries) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Max loop count reached\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n"); return false; } @@ -1279,32 +1216,24 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, timeout = true; /* try one last time after deadline */ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to read link status\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Downstream link training failure\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); return false; } if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Interlane align done\n", - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n"); break; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Interlane align timeout\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n"); return false; } @@ -1322,16 +1251,12 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lttpr_count) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 link_status[DP_LINK_STATUS_SIZE]; unsigned long deadline; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_2_CDS) != 1) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to start 128b/132b TPS2 CDS\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n"); return false; } @@ -1347,34 +1272,26 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, usleep_range(2000, 3000); if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] Failed to read link status\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_eq_interlane_align_done(link_status) && drm_dp_128b132b_cds_interlane_align_done(link_status) && drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] CDS interlane align done\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); break; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] Downstream link training failure\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); return false; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); - drm_err(&i915->drm, - "[ENCODER:%d:%s] CDS timeout\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n"); return false; } } @@ -1390,15 +1307,10 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lttpr_count) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_connector *connector = intel_dp->attached_connector; - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool passed = false; if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { - drm_err(&i915->drm, - "[ENCODER:%d:%s] 128b/132b intra-hop not clear\n", - encoder->base.base.id, encoder->base.name); + lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); return false; } @@ -1406,12 +1318,10 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) passed = true; - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s][ENCODER:%d:%s] 128b/132b Link Training %s at link rate = %d, lane count = %d\n", - connector->base.base.id, connector->base.name, - encoder->base.base.id, encoder->base.name, - passed ? "passed" : "failed", - crtc_state->port_clock, crtc_state->lane_count); + lt_dbg(intel_dp, DP_PHY_DPRX, + "128b/132b Link Training %s at link rate = %d, lane count = %d\n", + passed ? "passed" : "failed", + crtc_state->port_clock, crtc_state->lane_count); return passed; } @@ -1430,8 +1340,6 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_connector *connector = intel_dp->attached_connector; - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool passed; /* @@ -1464,10 +1372,7 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, * ignore_long_hpd flag can unset from the testcase. */ if (!passed && i915->display.hotplug.ignore_long_hpd) { - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n", - connector->base.base.id, connector->base.name, - encoder->base.base.id, encoder->base.name); + lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n"); return; } @@ -1478,8 +1383,6 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* * VIDEO_DIP_CTL register bit 31 should be set to '0' to not * disable SDP CRC. This is applicable for Display version 13. @@ -1492,5 +1395,5 @@ void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, DP_SDP_ERROR_DETECTION_CONFIGURATION, DP_SDP_CRC16_128B132B_EN); - drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); + lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 2c49d9ab86a2..e3f176a093d2 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -298,7 +298,7 @@ static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) to_intel_connector(conn_state->connector); if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - return connector->port->has_audio; + return connector->base.display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } @@ -318,6 +318,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; @@ -800,9 +801,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, /* Enable hdcp if it's desired */ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(to_intel_connector(conn_state->connector), - pipe_config, - (u8)conn_state->hdcp_content_type); + intel_hdcp_enable(state, encoder, pipe_config, conn_state); } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -837,15 +836,17 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - struct edid *edid; + const struct drm_edid *drm_edid; int ret; if (drm_connector_is_unregistered(connector)) return intel_connector_update_modes(connector, NULL); - edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); - ret = intel_connector_update_modes(connector, edid); - kfree(edid); + drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port); + + ret = intel_connector_update_modes(connector, drm_edid); + + drm_edid_free(drm_edid); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 4e9c18be7e1f..824be7f03724 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -8,6 +8,7 @@ #include "i915_reg.h" #include "intel_crtc.h" +#include "intel_cx0_phy.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_types.h" @@ -995,6 +996,32 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static int mtl_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder = + intel_get_crtc_new_encoder(state, crtc_state); + enum phy phy = intel_port_to_phy(i915, encoder->port); + int ret; + + ret = intel_cx0pll_calc_state(crtc_state, encoder); + if (ret) + return ret; + + /* TODO: Do the readback via intel_compute_shared_dplls() */ + if (intel_is_c10phy(i915, phy)) + crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); + else + crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); + + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return 0; +} + static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { return dpll->m < factor * dpll->n; @@ -1423,6 +1450,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, return 0; } +static const struct intel_dpll_funcs mtl_dpll_funcs = { + .crtc_compute_clock = mtl_crtc_compute_clock, +}; + static const struct intel_dpll_funcs dg2_dpll_funcs = { .crtc_compute_clock = dg2_crtc_compute_clock, }; @@ -1517,7 +1548,9 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { - if (IS_DG2(dev_priv)) + if (DISPLAY_VER(dev_priv) >= 14) + dev_priv->display.funcs.dpll = &mtl_dpll_funcs; + else if (IS_DG2(dev_priv)) dev_priv->display.funcs.dpll = &dg2_dpll_funcs; else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) dev_priv->display.funcs.dpll = &hsw_dpll_funcs; @@ -2047,7 +2080,7 @@ static void assert_pll(struct drm_i915_private *dev_priv, bool cur_state; cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 22fc908b7e5d..6b2d8a1e2aa9 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -169,8 +169,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, return; cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state); - I915_STATE_WARN(cur_state != state, - "%s assertion failure (expected %s, current %s)\n", + I915_STATE_WARN(dev_priv, cur_state != state, + "%s assertion failure (expected %s, current %s)\n", pll->info->name, str_on_off(state), str_on_off(cur_state)); } @@ -351,13 +351,35 @@ intel_find_shared_dpll(struct intel_atomic_state *state, return NULL; } +/** + * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC + * @crtc: CRTC on which behalf the reference is taken + * @pll: DPLL for which the reference is taken + * @shared_dpll_state: the DPLL atomic state in which the reference is tracked + * + * Take a reference for @pll tracking the use of it by @crtc. + */ +static void +intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0); + + shared_dpll_state->pipe_mask |= BIT(crtc->pipe); + + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n", + crtc->base.base.id, crtc->base.name, pll->info->name); +} + static void intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; @@ -366,11 +388,29 @@ intel_reference_shared_dpll(struct intel_atomic_state *state, if (shared_dpll[id].pipe_mask == 0) shared_dpll[id].hw_state = *pll_state; - drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0); + intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); +} + +/** + * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC + * @crtc: CRTC on which behalf the reference is dropped + * @pll: DPLL for which the reference is dropped + * @shared_dpll_state: the DPLL atomic state in which the reference is tracked + * + * Drop a reference for @pll tracking the end of use of it by @crtc. + */ +void +intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0); - shared_dpll[id].pipe_mask |= BIT(crtc->pipe); + shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe); - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n", + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); } @@ -378,18 +418,12 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0); - - shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe); - - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n", - crtc->base.base.id, crtc->base.name, pll->info->name); + intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); } static void intel_put_dpll(struct intel_atomic_state *state, @@ -464,12 +498,11 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) u32 val; bool enabled; - I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); - I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); + I915_STATE_WARN(dev_priv, !enabled, + "PCH refclk assertion failure, should be active but is disabled\n"); } static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, @@ -4104,7 +4137,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->display.dpll.lock); - if (IS_DG2(dev_priv)) + if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) /* No shared DPLLs on DG2; port PLLs are part of the PHY */ dpll_mgr = NULL; else if (IS_ALDERLAKE_P(dev_priv)) @@ -4314,7 +4347,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, to_intel_crtc_state(crtc->base.state); if (crtc_state->hw.active && crtc_state->shared_dpll == pll) - pll->state.pipe_mask |= BIT(crtc->pipe); + intel_reference_shared_dpll_crtc(crtc, pll, &pll->state); } pll->active_mask = pll->state.pipe_mask; @@ -4407,17 +4440,18 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { - I915_STATE_WARN(!pll->on && pll->active_mask, + I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); - I915_STATE_WARN(pll->on && !pll->active_mask, + I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask, "pll is on but not used by any active pipe\n"); - I915_STATE_WARN(pll->on != active, + I915_STATE_WARN(dev_priv, pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); } if (!crtc) { - I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask, + I915_STATE_WARN(dev_priv, + pll->active_mask & ~pll->state.pipe_mask, "more active pll users than references: 0x%x vs 0x%x\n", pll->active_mask, pll->state.pipe_mask); @@ -4427,20 +4461,20 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) - I915_STATE_WARN(!(pll->active_mask & pipe_mask), + I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask), "pll active mismatch (expected pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); else - I915_STATE_WARN(pll->active_mask & pipe_mask, + I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask), + I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", pipe_mask, pll->state.pipe_mask); - I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, - &dpll_hw_state, + I915_STATE_WARN(dev_priv, + pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, sizeof(dpll_hw_state)), "pll hw state mismatch\n"); } @@ -4460,10 +4494,10 @@ void intel_shared_dpll_state_verify(struct intel_crtc *crtc, u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; - I915_STATE_WARN(pll->active_mask & pipe_mask, + I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(pll->state.pipe_mask & pipe_mask, + I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask, "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n", pipe_name(crtc->pipe), pll->state.pipe_mask); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 3854f1b4299a..ba62eb5d7c51 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -341,6 +341,9 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder); void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); +void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state); void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, enum icl_port_dpll_id port_dpll_id); void intel_update_active_dpll(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index eb2dcd866cc8..9884678743b6 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -271,6 +271,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return 0; diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index c004f08fcfe1..0d27a98dcbbe 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -157,6 +157,32 @@ struct intel_modifier_desc { static const struct intel_modifier_desc intel_modifiers[] = { { + .modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS, + .display_ver = { 14, 14 }, + .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC, + + .ccs.packed_aux_planes = BIT(1), + .ccs.planar_aux_planes = BIT(2) | BIT(3), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS, + .display_ver = { 14, 14 }, + .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC, + .display_ver = { 14, 14 }, + .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC_CC, + + .ccs.cc_planes = BIT(2), + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_cc_formats), + }, { .modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS, .display_ver = { 13, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC, @@ -370,6 +396,14 @@ static bool plane_has_modifier(struct drm_i915_private *i915, if (!plane_caps_contain_all(plane_caps, md->plane_caps)) return false; + /* + * Separate AuxCCS and Flat CCS modifiers to be run only on platforms + * where supported. + */ + if (intel_fb_is_ccs_modifier(md->modifier) && + HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes) + return false; + return true; } @@ -489,7 +523,7 @@ static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, in { const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); - return check_modifier_display_ver_range(md, 12, 13) && + return check_modifier_display_ver_range(md, 12, 14) && ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } @@ -605,6 +639,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: + case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: @@ -791,6 +828,9 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 1aca7552a85d..fffd568070d4 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -243,7 +243,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) struct i915_vma *vma; bool phys_cursor = plane->id == PLANE_CURSOR && - INTEL_INFO(dev_priv)->display.cursor_needs_physical; + DISPLAY_INFO(dev_priv)->cursor_needs_physical; if (!intel_fb_uses_dpt(fb)) { vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b507ff944864..1966f9396201 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -44,6 +44,7 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" +#include "i915_reg.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_cdclk.h" @@ -55,7 +56,7 @@ #define for_each_fbc_id(__dev_priv, __fbc_id) \ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ - for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) + for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ for_each_fbc_id((__dev_priv), (__fbc_id)) \ @@ -1707,10 +1708,10 @@ void intel_fbc_init(struct drm_i915_private *i915) enum intel_fbc_id fbc_id; if (!drm_mm_initialized(&i915->mm.stolen)) - RUNTIME_INFO(i915)->fbc_mask = 0; + DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0; if (need_fbc_vtd_wa(i915)) - RUNTIME_INFO(i915)->fbc_mask = 0; + DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index eccaceaf8b9d..3b5690acd720 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -135,6 +135,9 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) return i915_gem_fb_mmap(obj, vma); } +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow overriding the default ops"); + static const struct fb_ops intelfb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), @@ -146,6 +149,8 @@ static const struct fb_ops intelfb_ops = { .fb_mmap = intel_fbdev_mmap, }; +__diag_pop(); + static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 55283677c45a..e12b46a84fa1 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -36,7 +36,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, } else { cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; } - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "FDI TX state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } @@ -57,7 +57,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, bool cur_state; cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(dev_priv, cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } @@ -86,7 +86,8 @@ void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, return; cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; - I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n"); + I915_STATE_WARN(i915, !cur_state, + "FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll(struct drm_i915_private *i915, @@ -95,7 +96,7 @@ static void assert_fdi_rx_pll(struct drm_i915_private *i915, bool cur_state; cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(i915, cur_state != state, "FDI RX PLL assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index b708a62e509a..09a7fa6c0c37 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -26,7 +26,9 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 3ddfc8080ee8..e95ddb580ef6 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -991,3 +991,8 @@ void intel_gmbus_teardown(struct drm_i915_private *i915) i915->display.gmbus.bus[pin] = NULL; } } + +void intel_gmbus_irq_handler(struct drm_i915_private *i915) +{ + wake_up_all(&i915->display.gmbus.wait_queue); +} diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index 20f704bd4e70..8111eb23e2af 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -46,4 +46,6 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter); void intel_gmbus_reset(struct drm_i915_private *dev_priv); +void intel_gmbus_irq_handler(struct drm_i915_private *i915); + #endif /* __INTEL_GMBUS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 650232c4892b..17542c28dfd5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -30,7 +30,8 @@ #define KEY_LOAD_TRIES 5 #define HDCP2_LC_RETRY_CNT 3 -static int intel_conn_to_vcpi(struct intel_connector *connector) +static int intel_conn_to_vcpi(struct drm_atomic_state *state, + struct intel_connector *connector) { struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_atomic_payload *payload; @@ -42,7 +43,7 @@ static int intel_conn_to_vcpi(struct intel_connector *connector) return 0; mgr = connector->port->mgr; - drm_modeset_lock(&mgr->base.lock, NULL); + drm_modeset_lock(&mgr->base.lock, state->acquire_ctx); mst_state = to_drm_dp_mst_topology_state(mgr->base.state); payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); if (drm_WARN_ON(mgr->dev, !payload)) @@ -54,7 +55,6 @@ static int intel_conn_to_vcpi(struct intel_connector *connector) goto out; } out: - drm_modeset_unlock(&mgr->base.lock); return vcpi; } @@ -68,48 +68,18 @@ out: * DP MST topology. Though it is not compulsory, security fw should change its * policy to mark different content_types for different streams. */ -static int +static void intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) { - struct drm_connector_list_iter conn_iter; - struct intel_digital_port *conn_dig_port; - struct intel_connector *connector; - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; bool enforce_type0 = false; int k; - data->k = 0; - if (dig_port->hdcp_auth_status) - return 0; - - drm_connector_list_iter_begin(&i915->drm, &conn_iter); - for_each_intel_connector_iter(connector, &conn_iter) { - if (connector->base.status == connector_status_disconnected) - continue; - - if (!intel_encoder_is_mst(intel_attached_encoder(connector))) - continue; - - conn_dig_port = intel_attached_dig_port(connector); - if (conn_dig_port != dig_port) - continue; - - if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable) - enforce_type0 = true; - - data->streams[data->k].stream_id = intel_conn_to_vcpi(connector); - data->k++; - - /* if there is only one active stream */ - if (dig_port->dp.active_mst_links <= 1) - break; - } - drm_connector_list_iter_end(&conn_iter); + return; - if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) - return -EINVAL; + if (!dig_port->hdcp_mst_type1_capable) + enforce_type0 = true; /* * Apply common protection level across all streams in DP MST Topology. @@ -118,27 +88,19 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) for (k = 0; k < data->k; k++) data->streams[k].stream_type = enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; - - return 0; } -static int intel_hdcp_prepare_streams(struct intel_connector *connector) +static void intel_hdcp_prepare_streams(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; - int ret; if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { - data->k = 1; data->streams[0].stream_type = hdcp->content_type; } else { - ret = intel_hdcp_required_content_stream(dig_port); - if (ret) - return ret; + intel_hdcp_required_content_stream(dig_port); } - - return 0; } static @@ -202,10 +164,8 @@ bool intel_hdcp_capable(struct intel_connector *connector) bool intel_hdcp2_capable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct intel_gt *gt = dev_priv->media_gt; - struct intel_gsc_uc *gsc = >->uc.gsc; bool capable = false; /* I915 support for HDCP2.2 */ @@ -213,17 +173,21 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ - if (intel_hdcp_gsc_cs_required(dev_priv)) - if (!intel_uc_fw_is_running(&gsc->fw)) + if (intel_hdcp_gsc_cs_required(i915)) { + struct intel_gt *gt = i915->media_gt; + struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; + + if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) return false; + } /* MEI/GSC interface is solid depending on which is used */ - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return false; } - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); /* Sink's capability for HDCP2.2 */ hdcp->shim->hdcp_2_2_capable(dig_port, &capable); @@ -231,20 +195,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return capable; } -static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, +static bool intel_hdcp_in_use(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(dev_priv, - HDCP_STATUS(dev_priv, cpu_transcoder, port)) & - HDCP_STATUS_ENC; + return intel_de_read(i915, + HDCP_STATUS(i915, cpu_transcoder, port)) & + HDCP_STATUS_ENC; } -static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, +static bool intel_hdcp2_in_use(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(dev_priv, - HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & - LINK_ENCRYPTION_STATUS; + return intel_de_read(i915, + HDCP2_STATUS(i915, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS; } static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, @@ -268,7 +232,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, return 0; } -static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) +static bool hdcp_key_loadable(struct drm_i915_private *i915) { enum i915_power_well_id id; intel_wakeref_t wakeref; @@ -278,14 +242,14 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) * On HSW and BDW, Display HW loads the Key as soon as Display resumes. * On all BXT+, SW can load the keys only when the PW#1 is turned on. */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) id = HSW_DISP_PW_GLOBAL; else id = SKL_DISP_PW_1; /* PG1 (power well #1) needs to be enabled */ - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - enabled = intel_display_power_well_is_enabled(dev_priv, id); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + enabled = intel_display_power_well_is_enabled(i915, id); /* * Another req for hdcp key loadability is enabled state of pll for @@ -296,19 +260,19 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) return enabled; } -static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) +static void intel_hdcp_clear_keys(struct drm_i915_private *i915) { - intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); - intel_de_write(dev_priv, HDCP_KEY_STATUS, + intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + intel_de_write(i915, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); } -static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) +static int intel_hdcp_load_keys(struct drm_i915_private *i915) { int ret; u32 val; - val = intel_de_read(dev_priv, HDCP_KEY_STATUS); + val = intel_de_read(i915, HDCP_KEY_STATUS); if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) return 0; @@ -316,8 +280,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * On HSW and BDW HW loads the HDCP1.4 Key when Display comes * out of reset. So if Key is not already loaded, its an error state. */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) return -ENXIO; /* @@ -328,20 +292,20 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * process from other platforms. These platforms use the GT Driver * Mailbox interface. */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); + if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { + ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Failed to initiate HDCP key load (%d)\n", ret); return ret; } } else { - intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); } /* Wait for the keys to load (500us) */ - ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, + ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) @@ -350,27 +314,27 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) return -ENXIO; /* Send Aksv over to PCH display for use in authentication */ - intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); return 0; } /* Returns updated SHA-1 index */ -static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) +static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) { - intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text); - if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { - drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n"); + intel_de_write(i915, HDCP_SHA_TEXT, sha_text); + if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { + drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } return 0; } static -u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, +u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(i915) >= 12) { switch (cpu_transcoder) { case TRANSCODER_A: return HDCP_TRANSA_REP_PRESENT | @@ -385,7 +349,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, return HDCP_TRANSD_REP_PRESENT | HDCP_TRANSD_SHA1_M0; default: - drm_err(&dev_priv->drm, "Unknown transcoder %d\n", + drm_err(&i915->drm, "Unknown transcoder %d\n", cpu_transcoder); return -EINVAL; } @@ -403,7 +367,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - drm_err(&dev_priv->drm, "Unknown port %d\n", port); + drm_err(&i915->drm, "Unknown port %d\n", port); return -EINVAL; } } @@ -414,7 +378,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; @@ -425,7 +389,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ret = shim->read_v_prime_part(dig_port, i, &vprime); if (ret) return ret; - intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime); + intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); } /* @@ -441,8 +405,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx = 0; sha_text = 0; sha_leftovers = 0; - rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); - intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; @@ -454,14 +418,14 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_text |= ksv[j] << off; } - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); if (!(sha_idx % 64)) - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Store the leftover bytes from the ksv in sha_text */ @@ -478,7 +442,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, if (sizeof(sha_text) > sha_leftovers) continue; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_leftovers = 0; @@ -494,73 +458,73 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, */ if (sha_leftovers == 0) { /* Write 16 bits of text, 16 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(dev_priv, + ret = intel_write_sha_text(i915, bstatus[0] << 8 | bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 1) { /* Write 24 bits of text, 8 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); sha_text |= bstatus[0] << 16 | bstatus[1] << 8; /* Only 24-bits of data, must be in the LSB */ sha_text = (sha_text & 0xffffff00) >> 8; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 2) { /* Write 32 bits of text */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 8 | bstatus[1]; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); for (i = 0; i < 2; i++) { - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -570,56 +534,56 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * Terminate the SHA-1 stream by hand. For the other leftover * cases this is appended by the hardware. */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { /* Write 32 bits of text (filled from LSB) */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0]; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(dev_priv, bstatus[1]); + ret = intel_write_sha_text(i915, bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else { - drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n", + drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", sha_leftovers); return -EINVAL; } - intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ while ((sha_idx % 64) < (64 - sizeof(sha_text))) { - ret = intel_write_sha_text(dev_priv, 0); + ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -631,20 +595,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * - 10 bytes for BINFO/BSTATUS(2), M0(8) */ sha_text = (num_downstream * 5 + 10) * 8; - ret = intel_write_sha_text(dev_priv, sha_text); + ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; /* Tell the HW we're done with the hash and wait for it to ACK */ - intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, + if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, 1)) { - drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n"); + drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } - if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { - drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n"); + if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); return -ENXIO; } @@ -656,14 +620,14 @@ static int intel_hdcp_auth_downstream(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "KSV list failed to become ready (%d)\n", ret); return ret; } @@ -674,7 +638,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n"); + drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); return -EPERM; } @@ -687,14 +651,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); if (num_downstream == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Repeater with zero downstream devices\n"); return -EINVAL; } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) { - drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n"); + drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); return -ENOMEM; } @@ -702,9 +666,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (ret) goto err; - if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo, + if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, num_downstream) > 0) { - drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n"); + drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; } @@ -722,12 +686,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) } if (i == tries) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "V Prime validation failed.(%d)\n", ret); goto err; } - drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n", + drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", num_downstream); ret = 0; err: @@ -739,7 +703,7 @@ err: static int intel_hdcp_auth(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; @@ -771,7 +735,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret) return ret; if (!hdcp_capable) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Panel is not HDCP capable\n"); return -EINVAL; } @@ -779,24 +743,24 @@ static int intel_hdcp_auth(struct intel_connector *connector) /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - intel_de_write(dev_priv, - HDCP_ANINIT(dev_priv, cpu_transcoder, port), + intel_de_write(i915, + HDCP_ANINIT(i915, cpu_transcoder, port), get_random_u32()); - intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), + intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(dev_priv, - HDCP_STATUS(dev_priv, cpu_transcoder, port), + if (intel_de_wait_for_set(i915, + HDCP_STATUS(i915, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { - drm_err(&dev_priv->drm, "Timed out waiting for An\n"); + drm_err(&i915->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = intel_de_read(dev_priv, - HDCP_ANLO(dev_priv, cpu_transcoder, port)); - an.reg[1] = intel_de_read(dev_priv, - HDCP_ANHI(dev_priv, cpu_transcoder, port)); + an.reg[0] = intel_de_read(i915, + HDCP_ANLO(i915, cpu_transcoder, port)); + an.reg[1] = intel_de_read(i915, + HDCP_ANHI(i915, cpu_transcoder, port)); ret = shim->write_an_aksv(dig_port, an.shim); if (ret) return ret; @@ -809,34 +773,34 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret < 0) return ret; - if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) { - drm_err(&dev_priv->drm, "BKSV is revoked\n"); + if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { + drm_err(&i915->drm, "BKSV is revoked\n"); return -EPERM; } - intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port), + intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), bksv.reg[0]); - intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port), + intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(dig_port, &repeater_present); if (ret) return ret; if (repeater_present) - intel_de_write(dev_priv, HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); + intel_de_write(i915, HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; - intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), + intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { - drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n"); + drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } @@ -862,30 +826,30 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(dig_port, ri.shim); if (ret) return ret; - intel_de_write(dev_priv, - HDCP_RPRIME(dev_priv, cpu_transcoder, port), + intel_de_write(i915, + HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Timed out waiting for Ri prime match (%x)\n", - intel_de_read(dev_priv, HDCP_STATUS(dev_priv, - cpu_transcoder, port))); + intel_de_read(i915, + HDCP_STATUS(i915, cpu_transcoder, port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(dev_priv, - HDCP_STATUS(dev_priv, cpu_transcoder, port), + if (intel_de_wait_for_set(i915, + HDCP_STATUS(i915, cpu_transcoder, port), HDCP_STATUS_ENC, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&dev_priv->drm, "Timed out waiting for encryption\n"); + drm_err(&i915->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -893,42 +857,42 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (shim->stream_encryption) { ret = shim->stream_encryption(connector, true); if (ret) { - drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", + drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", connector->base.name, connector->base.base.id); return ret; } - drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", + drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } if (repeater_present) return intel_hdcp_auth_downstream(connector); - drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n"); + drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); return 0; } static int _intel_hdcp_disable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; u32 repeater_ctl; int ret; - drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); if (hdcp->shim->stream_encryption) { ret = hdcp->shim->stream_encryption(connector, false); if (ret) { - drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", + drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", connector->base.name, connector->base.base.id); return ret; } - drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", + drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); /* * If there are other connectors on this port using HDCP, @@ -940,51 +904,51 @@ static int _intel_hdcp_disable(struct intel_connector *connector) } hdcp->hdcp_encrypted = false; - intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0); - if (intel_de_wait_for_clear(dev_priv, - HDCP_STATUS(dev_priv, cpu_transcoder, port), + intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); + if (intel_de_wait_for_clear(i915, + HDCP_STATUS(i915, cpu_transcoder, port), ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } - repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, + repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); - intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0); + intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); + drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); return ret; } - drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n"); + drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); return 0; } static int _intel_hdcp_enable(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int i, ret, tries = 3; - drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n", + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n", connector->base.name, connector->base.base.id); - if (!hdcp_key_loadable(dev_priv)) { - drm_err(&dev_priv->drm, "HDCP key Load is not possible\n"); + if (!hdcp_key_loadable(i915)) { + drm_err(&i915->drm, "HDCP key Load is not possible\n"); return -ENXIO; } for (i = 0; i < KEY_LOAD_TRIES; i++) { - ret = intel_hdcp_load_keys(dev_priv); + ret = intel_hdcp_load_keys(i915); if (!ret) break; - intel_hdcp_clear_keys(dev_priv); + intel_hdcp_clear_keys(i915); } if (ret) { - drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n", + drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", ret); return ret; } @@ -997,13 +961,13 @@ static int _intel_hdcp_enable(struct intel_connector *connector) return 0; } - drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret); + drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); /* Ensuring HDCP encryption and signalling are stopped. */ _intel_hdcp_disable(connector); } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } @@ -1045,7 +1009,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, static int intel_hdcp_check_link(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -1063,12 +1027,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&dev_priv->drm, - !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { - drm_err(&dev_priv->drm, + if (drm_WARN_ON(&i915->drm, + !intel_hdcp_in_use(i915, cpu_transcoder, port))) { + drm_err(&i915->drm, "%s:%d HDCP link stopped encryption,%x\n", connector->base.name, connector->base.base.id, - intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port))); + intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); ret = -ENXIO; intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, @@ -1084,13 +1048,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP link failed, retrying authentication\n", connector->base.name, connector->base.base.id); ret = _intel_hdcp_disable(connector); if (ret) { - drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret); + drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); @@ -1099,7 +1063,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector) ret = _intel_hdcp_enable(connector); if (ret) { - drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret); + drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); @@ -1117,9 +1081,9 @@ static void intel_hdcp_prop_work(struct work_struct *work) struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, prop_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); - drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL); + drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); mutex_lock(&hdcp->mutex); /* @@ -1132,15 +1096,15 @@ static void intel_hdcp_prop_work(struct work_struct *work) hdcp->value); mutex_unlock(&hdcp->mutex); - drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); + drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); drm_connector_put(&connector->base); } -bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) +bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) { - return RUNTIME_INFO(dev_priv)->has_hdcp && - (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E); + return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && + (DISPLAY_VER(i915) >= 12 || port < PORT_E); } static int @@ -1149,23 +1113,23 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); if (ret) - drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", + drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1179,15 +1143,15 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } @@ -1195,9 +1159,9 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n", + drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1207,22 +1171,22 @@ static int hdcp2_verify_hprime(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1233,23 +1197,23 @@ hdcp2_store_pairing_info(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", + drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1260,23 +1224,23 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", + drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1287,23 +1251,23 @@ hdcp2_verify_lprime(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", + drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1313,23 +1277,23 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", + drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1342,15 +1306,15 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } @@ -1359,9 +1323,9 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, rep_topology, rep_send_ack); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Verify rep topology failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1372,22 +1336,22 @@ hdcp2_verify_mprime(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1396,23 +1360,23 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", + drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", ret); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1420,21 +1384,21 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) static int hdcp2_close_session(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_master *arbiter; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - arbiter = dev_priv->display.hdcp.master; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, &dig_port->hdcp_port_data); - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -1448,7 +1412,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector) static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_ake_init ake_init; @@ -1480,16 +1444,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) return ret; if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { - drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n"); + drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); - if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, + if (drm_hdcp_check_ksvs_revoked(&i915->drm, msgs.send_cert.cert_rx.receiver_id, 1) > 0) { - drm_err(&dev_priv->drm, "Receiver ID is revoked\n"); + drm_err(&i915->drm, "Receiver ID is revoked\n"); return -EPERM; } @@ -1643,7 +1607,7 @@ static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_send_receiverid_list recvid_list; @@ -1663,7 +1627,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { - drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n"); + drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); return -EINVAL; } @@ -1680,23 +1644,23 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); if (!hdcp->hdcp2_encrypted && seq_num_v) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Non zero Seq_num_v at first RecvId_List msg\n"); return -EINVAL; } if (seq_num_v < hdcp->seq_num_v) { /* Roll over of the seq_num_v from repeater. Reauthenticate. */ - drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n"); + drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); return -EINVAL; } device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, + if (drm_hdcp_check_ksvs_revoked(&i915->drm, msgs.recvid_list.receiver_ids, device_cnt) > 0) { - drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n"); + drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } @@ -1765,16 +1729,16 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) static int hdcp2_enable_stream_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; enum port port = dig_port->base.port; int ret = 0; - if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)) { - drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", + drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", connector->base.name, connector->base.base.id); ret = -EPERM; goto link_recover; @@ -1783,11 +1747,11 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, true); if (ret) { - drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", + drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", connector->base.name, connector->base.base.id); return ret; } - drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", + drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } @@ -1795,7 +1759,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) link_recover: if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n"); + drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); dig_port->hdcp_auth_status = false; data->k = 0; @@ -1806,34 +1770,34 @@ link_recover: static int hdcp2_enable_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + drm_WARN_ON(&i915->drm, + intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) { - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Failed to enable HDCP signalling. %d\n", ret); return ret; } } - if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ - intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 0, CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_set(dev_priv, - HDCP2_STATUS(dev_priv, cpu_transcoder, + ret = intel_de_wait_for_set(i915, + HDCP2_STATUS(i915, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); @@ -1845,31 +1809,31 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) static int hdcp2_disable_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)); - intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), CTL_LINK_ENCRYPTION_REQ, 0); - ret = intel_de_wait_for_clear(dev_priv, - HDCP2_STATUS(dev_priv, cpu_transcoder, + ret = intel_de_wait_for_clear(i915, + HDCP2_STATUS(i915, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) - drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); + drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Failed to disable HDCP signalling. %d\n", ret); return ret; @@ -1917,13 +1881,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { ret = hdcp2_authenticate_sink(connector); if (!ret) { - ret = intel_hdcp_prepare_streams(connector); - if (ret) { - drm_dbg_kms(&i915->drm, - "Prepare streams failed.(%d)\n", - ret); - break; - } + intel_hdcp_prepare_streams(connector); ret = hdcp2_propagate_stream_management_info(connector); if (ret) { @@ -2035,7 +1993,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery static int intel_hdcp2_check_link(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -2052,11 +2010,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&dev_priv->drm, - !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { - drm_err(&dev_priv->drm, + if (drm_WARN_ON(&i915->drm, + !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { + drm_err(&i915->drm, "HDCP2.2 link stopped the encryption, %x\n", - intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port))); + intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); ret = -ENXIO; _intel_hdcp2_disable(connector, true); intel_hdcp_update_value(connector, @@ -2079,7 +2037,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "HDCP2.2 Downstream topology change\n"); ret = hdcp2_authenticate_repeater_topology(connector); if (!ret) { @@ -2088,19 +2046,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) true); goto out; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[%s:%d] Repeater topology auth failed.(%d)\n", connector->base.name, connector->base.base.id, ret); } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 link failed, retrying auth\n", connector->base.name, connector->base.base.id); } ret = _intel_hdcp2_disable(connector, true); if (ret) { - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "[%s:%d] Failed to disable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); intel_hdcp_update_value(connector, @@ -2110,7 +2068,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) ret = _intel_hdcp2_enable(connector); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[%s:%d] Failed to enable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); @@ -2147,13 +2105,13 @@ static void intel_hdcp_check_work(struct work_struct *work) static int i915_hdcp_component_bind(struct device *i915_kdev, struct device *mei_kdev, void *data) { - struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); - drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data; - dev_priv->display.hdcp.master->hdcp_dev = mei_kdev; - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; + i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return 0; } @@ -2161,12 +2119,12 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, static void i915_hdcp_component_unbind(struct device *i915_kdev, struct device *mei_kdev, void *data) { - struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); - drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n"); - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - dev_priv->display.hdcp.master = NULL; - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + i915->display.hdcp.arbiter = NULL; + mutex_unlock(&i915->display.hdcp.hdcp_mutex); } static const struct component_ops i915_hdcp_ops = { @@ -2200,12 +2158,11 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(i915) < 12) data->hdcp_ddi = intel_get_hdcp_ddi_index(port); else /* @@ -2225,58 +2182,55 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, data->protocol = (u8)shim->protocol; if (!data->streams) - data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv), + data->streams = kcalloc(INTEL_NUM_PIPES(i915), sizeof(struct hdcp2_streamid_type), GFP_KERNEL); if (!data->streams) { - drm_err(&dev_priv->drm, "Out of Memory\n"); + drm_err(&i915->drm, "Out of Memory\n"); return -ENOMEM; } - /* For SST */ - data->streams[0].stream_id = 0; - data->streams[0].stream_type = hdcp->content_type; return 0; } -static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) +static bool is_hdcp2_supported(struct drm_i915_private *i915) { - if (intel_hdcp_gsc_cs_required(dev_priv)) + if (intel_hdcp_gsc_cs_required(i915)) return true; if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; - return (DISPLAY_VER(dev_priv) >= 10 || - IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)); + return (DISPLAY_VER(i915) >= 10 || + IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)); } -void intel_hdcp_component_init(struct drm_i915_private *dev_priv) +void intel_hdcp_component_init(struct drm_i915_private *i915) { int ret; - if (!is_hdcp2_supported(dev_priv)) + if (!is_hdcp2_supported(i915)) return; - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); - dev_priv->display.hdcp.comp_added = true; - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); - if (intel_hdcp_gsc_cs_required(dev_priv)) - ret = intel_hdcp_gsc_init(dev_priv); + i915->display.hdcp.comp_added = true; + mutex_unlock(&i915->display.hdcp.hdcp_mutex); + if (intel_hdcp_gsc_cs_required(i915)) + ret = intel_hdcp_gsc_init(i915); else - ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops, + ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, I915_COMPONENT_HDCP); if (ret < 0) { - drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n", + drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", ret); - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - dev_priv->display.hdcp.comp_added = false; - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + i915->display.hdcp.comp_added = false; + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return; } } @@ -2302,14 +2256,14 @@ int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; if (!shim) return -EINVAL; - if (is_hdcp2_supported(dev_priv)) + if (is_hdcp2_supported(i915)) intel_hdcp2_init(connector, dig_port, shim); ret = @@ -2330,10 +2284,60 @@ int intel_hdcp_init(struct intel_connector *connector, return 0; } -int intel_hdcp_enable(struct intel_connector *connector, - const struct intel_crtc_state *pipe_config, u8 content_type) +static int +intel_hdcp_set_streams(struct intel_digital_port *dig_port, + struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_connector_list_iter conn_iter; + struct intel_digital_port *conn_dig_port; + struct intel_connector *connector; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct hdcp_port_data *data = &dig_port->hdcp_port_data; + + if (!intel_encoder_is_mst(&dig_port->base)) { + data->k = 1; + data->streams[0].stream_id = 0; + return 0; + } + + data->k = 0; + + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->base.status == connector_status_disconnected) + continue; + + if (!intel_encoder_is_mst(intel_attached_encoder(connector))) + continue; + + conn_dig_port = intel_attached_dig_port(connector); + if (conn_dig_port != dig_port) + continue; + + data->streams[data->k].stream_id = + intel_conn_to_vcpi(&state->base, connector); + data->k++; + + /* if there is only one active stream */ + if (dig_port->dp.active_mst_links <= 1) + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) + return -EINVAL; + + return 0; +} + +int intel_hdcp_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_connector *connector = + to_intel_connector(conn_state->connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; @@ -2343,16 +2347,16 @@ int intel_hdcp_enable(struct intel_connector *connector, return -ENOENT; if (!connector->encoder) { - drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n", + drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n", connector->base.name, connector->base.base.id); return -ENODEV; } mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); - hdcp->content_type = content_type; + hdcp->content_type = (u8)conn_state->content_type; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; @@ -2362,7 +2366,7 @@ int intel_hdcp_enable(struct intel_connector *connector, hdcp->stream_transcoder = INVALID_TRANSCODER; } - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(i915) >= 12) dig_port->hdcp_port_data.hdcp_transcoder = intel_get_hdcp_transcoder(hdcp->cpu_transcoder); @@ -2371,9 +2375,17 @@ int intel_hdcp_enable(struct intel_connector *connector, * is capable of HDCP2.2, it is preferred to use HDCP2.2. */ if (intel_hdcp2_capable(connector)) { - ret = _intel_hdcp2_enable(connector); - if (!ret) - check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; + ret = intel_hdcp_set_streams(dig_port, state); + if (!ret) { + ret = _intel_hdcp2_enable(connector); + if (!ret) + check_link_interval = + DRM_HDCP2_CHECK_PERIOD_MS; + } else { + drm_dbg_kms(&i915->drm, + "Set content streams failed: (%d)\n", + ret); + } } /* @@ -2483,26 +2495,24 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, } if (desired_and_not_enabled || content_protection_type_changed) - intel_hdcp_enable(connector, - crtc_state, - (u8)conn_state->hdcp_content_type); + intel_hdcp_enable(state, encoder, crtc_state, conn_state); } -void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) +void intel_hdcp_component_fini(struct drm_i915_private *i915) { - mutex_lock(&dev_priv->display.hdcp.comp_mutex); - if (!dev_priv->display.hdcp.comp_added) { - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + mutex_lock(&i915->display.hdcp.hdcp_mutex); + if (!i915->display.hdcp.comp_added) { + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return; } - dev_priv->display.hdcp.comp_added = false; - mutex_unlock(&dev_priv->display.hdcp.comp_mutex); + i915->display.hdcp.comp_added = false; + mutex_unlock(&i915->display.hdcp.hdcp_mutex); - if (intel_hdcp_gsc_cs_required(dev_priv)) - intel_hdcp_gsc_fini(dev_priv); + if (intel_hdcp_gsc_cs_required(i915)) + intel_hdcp_gsc_fini(i915); else - component_del(dev_priv->drm.dev, &i915_hdcp_ops); + component_del(i915->drm.dev, &i915_hdcp_ops); } void intel_hdcp_cleanup(struct intel_connector *connector) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 8f53b0c7fe5c..5997c52a0958 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -28,18 +28,20 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_connector *connector, - const struct intel_crtc_state *pipe_config, u8 content_type); +int intel_hdcp_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state); int intel_hdcp_disable(struct intel_connector *connector); void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); +bool is_hdcp_supported(struct drm_i915_private *i915, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); bool intel_hdcp2_capable(struct intel_connector *connector); -void intel_hdcp_component_init(struct drm_i915_private *dev_priv); -void intel_hdcp_component_fini(struct drm_i915_private *dev_priv); +void intel_hdcp_component_init(struct drm_i915_private *i915); +void intel_hdcp_component_fini(struct drm_i915_private *i915); void intel_hdcp_cleanup(struct intel_connector *connector); void intel_hdcp_handle_cp_irq(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 7e52aea6aa17..72573ce1d0e9 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -5,11 +5,11 @@ #include <drm/i915_hdcp_interface.h> -#include "display/intel_hdcp_gsc.h" #include "gem/i915_gem_region.h" #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" #include "i915_drv.h" #include "i915_utils.h" +#include "intel_hdcp_gsc.h" bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) { @@ -697,19 +697,19 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) int intel_hdcp_gsc_init(struct drm_i915_private *i915) { - struct i915_hdcp_master *data; + struct i915_hdcp_arbiter *data; int ret; - data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL); + data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL); if (!data) return -ENOMEM; - mutex_lock(&i915->display.hdcp.comp_mutex); - i915->display.hdcp.master = data; - i915->display.hdcp.master->hdcp_dev = i915->drm.dev; - i915->display.hdcp.master->ops = &gsc_hdcp_ops; + mutex_lock(&i915->display.hdcp.hdcp_mutex); + i915->display.hdcp.arbiter = data; + i915->display.hdcp.arbiter->hdcp_dev = i915->drm.dev; + i915->display.hdcp.arbiter->ops = &gsc_hdcp_ops; ret = intel_hdcp_gsc_hdcp2_init(i915); - mutex_unlock(&i915->display.hdcp.comp_mutex); + mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } @@ -717,7 +717,7 @@ int intel_hdcp_gsc_init(struct drm_i915_private *i915) void intel_hdcp_gsc_fini(struct drm_i915_private *i915) { intel_hdcp_gsc_free_message(i915); - kfree(i915->display.hdcp.master); + kfree(i915->display.hdcp.arbiter); } static int intel_gsc_send_sync(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index a690a5616506..7ac5e6c5e00d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -40,12 +40,13 @@ #include <drm/drm_edid.h> #include <drm/intel_lpe_audio.h> -#include "i915_debugfs.h" +#include "g4x_hdmi.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" +#include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -58,7 +59,7 @@ #include "intel_panel.h" #include "intel_snps_phy.h" -static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi) +inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi) { return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev); } @@ -1789,7 +1790,9 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, const struct drm_connector_state *conn_state) { - return hdmi->has_hdmi_sink && + struct intel_connector *connector = hdmi->attached_connector; + + return connector->base.display_info.is_hdmi && READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } @@ -1865,16 +1868,19 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, * FIXME: We will hopefully get an algorithmic way of programming * the MPLLB for HDMI in the future. */ - if (IS_DG2(dev_priv)) + if (DISPLAY_VER(dev_priv) >= 14) + return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock); + else if (IS_DG2(dev_priv)) return intel_snps_phy_check_hdmi_link_rate(clock); return MODE_OK; } -int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) +int intel_hdmi_tmds_clock(int clock, int bpc, + enum intel_output_format sink_format) { /* YCBCR420 TMDS rate requirement is half the pixel clock */ - if (ycbcr420_output) + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) clock /= 2; /* @@ -1901,7 +1907,8 @@ static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bp } static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) + int bpc, bool has_hdmi_sink, + enum intel_output_format sink_format) { const struct drm_display_info *info = &connector->display_info; const struct drm_hdmi_info *hdmi = &info->hdmi; @@ -1911,7 +1918,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, if (!has_hdmi_sink) return false; - if (ycbcr420_output) + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36; else return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36; @@ -1919,7 +1926,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, if (!has_hdmi_sink) return false; - if (ycbcr420_output) + if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30; else return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30; @@ -1933,7 +1940,8 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, static enum drm_mode_status intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, - bool has_hdmi_sink, bool ycbcr420_output) + bool has_hdmi_sink, + enum intel_output_format sink_format) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); @@ -1946,12 +1954,12 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, * least one color depth is accepted. */ for (bpc = 12; bpc >= 8; bpc -= 2) { - int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); if (!intel_hdmi_source_bpc_possible(i915, bpc)) continue; - if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format)) continue; status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink); @@ -1976,6 +1984,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state); bool ycbcr_420_only; + enum intel_output_format sink_format; if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) clock *= 2; @@ -2000,14 +2009,20 @@ intel_hdmi_mode_valid(struct drm_connector *connector, ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode); - status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only); + if (ycbcr_420_only) + sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; + else + sink_format = INTEL_OUTPUT_FORMAT_RGB; + + status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format); if (status != MODE_OK) { if (ycbcr_420_only || !connector->ycbcr_420_allowed || !drm_mode_is_420_also(&connector->display_info, mode)) return status; - status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, true); + sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; + status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format); if (status != MODE_OK) return status; } @@ -2016,7 +2031,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, } bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) + int bpc, bool has_hdmi_sink) { struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; @@ -2027,7 +2042,8 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, + crtc_state->sink_format)) return false; } @@ -2051,8 +2067,7 @@ static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc adjusted_mode->crtc_hblank_start) % 8 == 2) return false; - return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, - intel_hdmi_is_ycbcr420(crtc_state)); + return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, @@ -2060,7 +2075,6 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, int clock, bool respect_downstream_limits) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; /* @@ -2078,7 +2092,8 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, bpc = 8; for (; bpc >= 8; bpc -= 2) { - int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, + crtc_state->sink_format); if (hdmi_bpc_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, tmds_clock, @@ -2108,7 +2123,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, return bpc; crtc_state->port_clock = - intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state)); + intel_hdmi_tmds_clock(clock, bpc, crtc_state->sink_format); /* * pipe_bpp could already be below 8bpc due to @@ -2156,7 +2171,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct drm_connector *connector = conn_state->connector; const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); @@ -2164,15 +2179,15 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder, return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - return intel_hdmi->has_audio; + return connector->display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } static enum intel_output_format -intel_hdmi_output_format(const struct intel_crtc_state *crtc_state, - struct intel_connector *connector, - bool ycbcr_420_output) +intel_hdmi_sink_format(const struct intel_crtc_state *crtc_state, + struct intel_connector *connector, + bool ycbcr_420_output) { if (!crtc_state->has_hdmi_sink) return INTEL_OUTPUT_FORMAT_RGB; @@ -2183,6 +2198,12 @@ intel_hdmi_output_format(const struct intel_crtc_state *crtc_state, return INTEL_OUTPUT_FORMAT_RGB; } +static enum intel_output_format +intel_hdmi_output_format(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->sink_format; +} + static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, @@ -2195,23 +2216,26 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; - crtc_state->output_format = - intel_hdmi_output_format(crtc_state, connector, ycbcr_420_only); + crtc_state->sink_format = + intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only); - if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) { + if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) { drm_dbg_kms(&i915->drm, "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); - crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; + crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; } + crtc_state->output_format = intel_hdmi_output_format(crtc_state); ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); if (ret) { - if (intel_hdmi_is_ycbcr420(crtc_state) || + if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + !crtc_state->has_hdmi_sink || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, adjusted_mode)) return ret; - crtc_state->output_format = intel_hdmi_output_format(crtc_state, connector, true); + crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; + crtc_state->output_format = intel_hdmi_output_format(crtc_state); ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); } @@ -2243,11 +2267,20 @@ static bool source_supports_scrambling(struct intel_encoder *encoder) return intel_hdmi_source_max_tmds_clock(encoder) > 340000; } +bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); + + return intel_has_hdmi_sink(hdmi, conn_state) && + !intel_hdmi_is_cloned(crtc_state); +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; @@ -2262,9 +2295,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - pipe_config->has_hdmi_sink = - intel_has_hdmi_sink(intel_hdmi, conn_state) && - !intel_hdmi_is_cloned(pipe_config); if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; @@ -2357,9 +2387,6 @@ intel_hdmi_unset_edid(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); - intel_hdmi->has_hdmi_sink = false; - intel_hdmi->has_audio = false; - intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; intel_hdmi->dp_dual_mode.max_tmds_clock = 0; @@ -2372,7 +2399,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); - enum port port = hdmi_to_dig_port(hdmi)->base.port; + struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(&dev_priv->drm, adapter); @@ -2388,7 +2415,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) */ if (type == DRM_DP_DUAL_MODE_UNKNOWN) { if (!connector->force && - intel_bios_is_port_dp_dual_mode(dev_priv, port)) { + intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { drm_dbg_kms(&dev_priv->drm, "Assuming DP dual mode adaptor presence based on VBT\n"); type = DRM_DP_DUAL_MODE_TYPE1_DVI; @@ -2411,7 +2438,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) /* Older VBTs are often buggy and can't be trusted :( Play it safe. */ if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && - !intel_bios_is_port_dp_dual_mode(dev_priv, port)) { + !intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { drm_dbg_kms(&dev_priv->drm, "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n"); hdmi->dp_dual_mode.max_tmds_clock = 0; @@ -2451,9 +2478,6 @@ intel_hdmi_set_edid(struct drm_connector *connector) /* FIXME: Get rid of drm_edid_raw() */ edid = drm_edid_raw(drm_edid); if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { - intel_hdmi->has_audio = drm_detect_monitor_audio(edid); - intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); - intel_hdmi_dp_dual_mode_detect(connector); connected = true; @@ -2597,10 +2621,21 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; +static int intel_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->dev); + + if (HAS_DDI(i915)) + return intel_digital_connector_atomic_check(connector, state); + else + return g4x_hdmi_connector_atomic_check(connector, state); +} + static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .atomic_check = intel_digital_connector_atomic_check, + .atomic_check = intel_hdmi_connector_atomic_check, }; static void diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 774dda2376ed..6b39df38d57a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -9,6 +9,7 @@ #include <linux/types.h> enum hdmi_infoframe_type; +enum intel_output_format; enum port; struct drm_connector; struct drm_connector_state; @@ -23,6 +24,9 @@ union hdmi_infoframe; void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); +bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); @@ -45,8 +49,8 @@ void intel_read_infoframe(struct intel_encoder *encoder, bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, - int bpc, bool has_hdmi_sink, bool ycbcr420_output); -int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output); + int bpc, bool has_hdmi_sink); +int intel_hdmi_tmds_clock(int clock, int bpc, enum intel_output_format sink_format); int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices, int output_format, bool hdmi_all_bpp, int hdmi_max_chunk_bytes); @@ -54,5 +58,6 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, int src_max_slices, int src_max_slice_width, int hdmi_max_slices, int hdmi_throughput); int intel_hdmi_dsc_get_slice_height(int vactive); +struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index b12900446828..23a5e1a875f1 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -27,6 +27,7 @@ #include "i915_irq.h" #include "intel_display_types.h" #include "intel_hotplug.h" +#include "intel_hotplug_irq.h" /** * DOC: Hotplug diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c new file mode 100644 index 000000000000..f95fa793fabb --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -0,0 +1,1442 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_de.h" +#include "intel_display_irq.h" +#include "intel_display_types.h" +#include "intel_dp_aux.h" +#include "intel_gmbus.h" +#include "intel_hotplug.h" +#include "intel_hotplug_irq.h" + +typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); +typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); +typedef u32 (*hotplug_mask_func)(enum hpd_pin pin); + +static const u32 hpd_ilk[HPD_NUM_PINS] = { + [HPD_PORT_A] = DE_DP_A_HOTPLUG, +}; + +static const u32 hpd_ivb[HPD_NUM_PINS] = { + [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, +}; + +static const u32 hpd_bdw[HPD_NUM_PINS] = { + [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), +}; + +static const u32 hpd_ibx[HPD_NUM_PINS] = { + [HPD_CRT] = SDE_CRT_HOTPLUG, + [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG, +}; + +static const u32 hpd_cpt[HPD_NUM_PINS] = { + [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, + [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, +}; + +static const u32 hpd_spt[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, + [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, +}; + +static const u32 hpd_mask_i915[HPD_NUM_PINS] = { + [HPD_CRT] = CRT_HOTPLUG_INT_EN, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, +}; + +static const u32 hpd_status_g4x[HPD_NUM_PINS] = { + [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, +}; + +static const u32 hpd_status_i915[HPD_NUM_PINS] = { + [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, +}; + +static const u32 hpd_bxt[HPD_NUM_PINS] = { + [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), + [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), + [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), +}; + +static const u32 hpd_gen11[HPD_NUM_PINS] = { + [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), + [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), + [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), + [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), + [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), + [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), +}; + +static const u32 hpd_xelpdp[HPD_NUM_PINS] = { + [HPD_PORT_TC1] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC1) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC1), + [HPD_PORT_TC2] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC2) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC2), + [HPD_PORT_TC3] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC3) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC3), + [HPD_PORT_TC4] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC4) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC4), +}; + +static const u32 hpd_icp[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), + [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), + [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), + [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), + [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), + [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), +}; + +static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), + [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), +}; + +static const u32 hpd_mtp[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), + [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), + [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), + [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), +}; + +static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) +{ + struct intel_hotplug *hpd = &dev_priv->display.hotplug; + + if (HAS_GMCH(dev_priv)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) + hpd->hpd = hpd_status_g4x; + else + hpd->hpd = hpd_status_i915; + return; + } + + if (DISPLAY_VER(dev_priv) >= 14) + hpd->hpd = hpd_xelpdp; + else if (DISPLAY_VER(dev_priv) >= 11) + hpd->hpd = hpd_gen11; + else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + hpd->hpd = hpd_bxt; + else if (DISPLAY_VER(dev_priv) == 9) + hpd->hpd = NULL; /* no north HPD on SKL */ + else if (DISPLAY_VER(dev_priv) >= 8) + hpd->hpd = hpd_bdw; + else if (DISPLAY_VER(dev_priv) >= 7) + hpd->hpd = hpd_ivb; + else + hpd->hpd = hpd_ilk; + + if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && + (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) + return; + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + hpd->pch_hpd = hpd_sde_dg1; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) + hpd->pch_hpd = hpd_mtp; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + hpd->pch_hpd = hpd_icp; + else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) + hpd->pch_hpd = hpd_spt; + else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) + hpd->pch_hpd = hpd_cpt; + else if (HAS_PCH_IBX(dev_priv)) + hpd->pch_hpd = hpd_ibx; + else + MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); +} + +/* For display hotplug interrupt */ +void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, + u32 mask, u32 bits) +{ + lockdep_assert_held(&dev_priv->irq_lock); + drm_WARN_ON(&dev_priv->drm, bits & ~mask); + + intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits); +} + +/** + * i915_hotplug_interrupt_update - update hotplug interrupt enable + * @dev_priv: driver private + * @mask: bits to update + * @bits: bits to enable + * NOTE: the HPD enable bits are modified both inside and outside + * of an interrupt context. To avoid that read-modify-write cycles + * interfer, these bits are protected by a spinlock. Since this + * function is usually not called from a context where the lock is + * held already, this function acquires the lock itself. A non-locking + * version is also available. + */ +void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, + u32 mask, + u32 bits) +{ + spin_lock_irq(&dev_priv->irq_lock); + i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); + spin_unlock_irq(&dev_priv->irq_lock); +} + +static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); + default: + return false; + } +} + +static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + return val & PORTA_HOTPLUG_LONG_DETECT; + case HPD_PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case HPD_PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + case HPD_PORT_B: + case HPD_PORT_C: + case HPD_PORT_D: + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); + default: + return false; + } +} + +static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return val & ICP_TC_HPD_LONG_DETECT(pin); + default: + return false; + } +} + +static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_E: + return val & PORTE_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + return val & PORTA_HOTPLUG_LONG_DETECT; + case HPD_PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case HPD_PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + case HPD_PORT_D: + return val & PORTD_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case HPD_PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + case HPD_PORT_D: + return val & PORTD_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_B: + return val & PORTB_HOTPLUG_INT_LONG_PULSE; + case HPD_PORT_C: + return val & PORTC_HOTPLUG_INT_LONG_PULSE; + case HPD_PORT_D: + return val & PORTD_HOTPLUG_INT_LONG_PULSE; + default: + return false; + } +} + +/* + * Get a bit mask of pins that have triggered, and which ones may be long. + * This can be called multiple times with the same masks to accumulate + * hotplug detection results from several registers. + * + * Note that the caller is expected to zero out the masks initially. + */ +static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, + u32 *pin_mask, u32 *long_mask, + u32 hotplug_trigger, u32 dig_hotplug_reg, + const u32 hpd[HPD_NUM_PINS], + bool long_pulse_detect(enum hpd_pin pin, u32 val)) +{ + enum hpd_pin pin; + + BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); + + for_each_hpd_pin(pin) { + if ((hpd[pin] & hotplug_trigger) == 0) + continue; + + *pin_mask |= BIT(pin); + + if (long_pulse_detect(pin, dig_hotplug_reg)) + *long_mask |= BIT(pin); + } + + drm_dbg(&dev_priv->drm, + "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); +} + +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, + const u32 hpd[HPD_NUM_PINS]) +{ + struct intel_encoder *encoder; + u32 enabled_irqs = 0; + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + enabled_irqs |= hpd[encoder->hpd_pin]; + + return enabled_irqs; +} + +static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, + const u32 hpd[HPD_NUM_PINS]) +{ + struct intel_encoder *encoder; + u32 hotplug_irqs = 0; + + for_each_intel_encoder(&dev_priv->drm, encoder) + hotplug_irqs |= hpd[encoder->hpd_pin]; + + return hotplug_irqs; +} + +static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, + hotplug_mask_func hotplug_mask) +{ + enum hpd_pin pin; + u32 hotplug = 0; + + for_each_hpd_pin(pin) + hotplug |= hotplug_mask(pin); + + return hotplug; +} + +static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, + hotplug_enables_func hotplug_enables) +{ + struct intel_encoder *encoder; + u32 hotplug = 0; + + for_each_intel_encoder(&i915->drm, encoder) + hotplug |= hotplug_enables(encoder); + + return hotplug; +} + +u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) +{ + u32 hotplug_status = 0, hotplug_status_mask; + int i; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; + + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; + + hotplug_status |= tmp; + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); + } + + drm_WARN_ONCE(&dev_priv->drm, 1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); + + return hotplug_status; +} + +void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) +{ + u32 pin_mask = 0, long_mask = 0; + u32 hotplug_trigger; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; + else + hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + + if (hotplug_trigger) { + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug_trigger, hotplug_trigger, + dev_priv->display.hotplug.hpd, + i9xx_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + } + + if ((IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) + intel_dp_aux_irq_handler(dev_priv); +} + +void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +{ + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + /* + * Somehow the PCH doesn't seem to really ack the interrupt to the CPU + * unless we touch the hotplug register, even if hotplug_trigger is + * zero. Not acking leads to "The master control interrupt lied (SDE)!" + * errors. + */ + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); + if (!hotplug_trigger) { + u32 mask = PORTA_HOTPLUG_STATUS_MASK | + PORTD_HOTPLUG_STATUS_MASK | + PORTC_HOTPLUG_STATUS_MASK | + PORTB_HOTPLUG_STATUS_MASK; + dig_hotplug_reg &= ~mask; + } + + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); + if (!hotplug_trigger) + return; + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.pch_hpd, + pch_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); +} + +void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) +{ + enum hpd_pin pin; + u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK); + u32 trigger_aux = iir & XELPDP_AUX_TC_MASK; + u32 pin_mask = 0, long_mask = 0; + + for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) { + u32 val; + + if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger)) + continue; + + pin_mask |= BIT(pin); + + val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin)); + intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val); + + if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT)) + long_mask |= BIT(pin); + } + + if (pin_mask) { + drm_dbg(&i915->drm, + "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, pin_mask, long_mask); + + intel_hpd_irq_handler(i915, pin_mask, long_mask); + } + + if (trigger_aux) + intel_dp_aux_irq_handler(i915); + + if (!pin_mask && !trigger_aux) + drm_err(&i915->drm, + "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir); +} + +void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; + u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; + u32 pin_mask = 0, long_mask = 0; + + if (ddi_hotplug_trigger) { + u32 dig_hotplug_reg; + + /* Locking due to DSI native GPIO sequences */ + spin_lock(&dev_priv->irq_lock); + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); + spin_unlock(&dev_priv->irq_lock); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + ddi_hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.pch_hpd, + icp_ddi_port_hotplug_long_detect); + } + + if (tc_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + tc_hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.pch_hpd, + icp_tc_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_ICP) + intel_gmbus_irq_handler(dev_priv); +} + +void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & + ~SDE_PORTE_HOTPLUG_SPT; + u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; + u32 pin_mask = 0, long_mask = 0; + + if (hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.pch_hpd, + spt_port_hotplug_long_detect); + } + + if (hotplug2_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug2_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.pch_hpd, + spt_port_hotplug2_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_CPT) + intel_gmbus_irq_handler(dev_priv); +} + +void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +{ + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.hpd, + ilk_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); +} + +void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +{ + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + hotplug_trigger, dig_hotplug_reg, + dev_priv->display.hotplug.hpd, + bxt_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); +} + +void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +{ + u32 pin_mask = 0, long_mask = 0; + u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; + u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; + + if (trigger_tc) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + trigger_tc, dig_hotplug_reg, + dev_priv->display.hotplug.hpd, + gen11_port_hotplug_long_detect); + } + + if (trigger_tbt) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + trigger_tbt, dig_hotplug_reg, + dev_priv->display.hotplug.hpd, + gen11_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + else + drm_err(&dev_priv->drm, + "Unexpected DE HPD interrupt 0x%08x\n", iir); +} + +static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + return PORTA_HOTPLUG_ENABLE; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_MASK; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_MASK; + case HPD_PORT_D: + return PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_MASK; + default: + return 0; + } +} + +static u32 ibx_hotplug_enables(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + /* + * When CPU and PCH are on the same package, port A + * HPD must be enabled in both north and south. + */ + return HAS_PCH_LPT_LP(i915) ? + PORTA_HOTPLUG_ENABLE : 0; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE | + PORTB_PULSE_DURATION_2ms; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE | + PORTC_PULSE_DURATION_2ms; + case HPD_PORT_D: + return PORTD_HOTPLUG_ENABLE | + PORTD_PULSE_DURATION_2ms; + default: + return 0; + } +} + +static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + /* + * Enable digital hotplug on the PCH, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec). + * The pulse duration bits are reserved on LPT+. + */ + intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); +} + +static void ibx_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, + ibx_hotplug_mask(encoder->hpd_pin), + ibx_hotplug_enables(encoder)); +} + +static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + ibx_hpd_detection_setup(dev_priv); +} + +static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + case HPD_PORT_B: + case HPD_PORT_C: + case HPD_PORT_D: + return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin); + default: + return 0; + } +} + +static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) +{ + return icp_ddi_hotplug_mask(encoder->hpd_pin); +} + +static u32 icp_tc_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return ICP_TC_HPD_ENABLE(hpd_pin); + default: + return 0; + } +} + +static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) +{ + return icp_tc_hotplug_mask(encoder->hpd_pin); +} + +static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); +} + +static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI, + icp_ddi_hotplug_mask(encoder->hpd_pin), + icp_ddi_hotplug_enables(encoder)); +} + +static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); +} + +static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC, + icp_tc_hotplug_mask(encoder->hpd_pin), + icp_tc_hotplug_enables(encoder)); +} + +static void icp_hpd_enable_detection(struct intel_encoder *encoder) +{ + icp_ddi_hpd_enable_detection(encoder); + icp_tc_hpd_enable_detection(encoder); +} + +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + + if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + icp_ddi_hpd_detection_setup(dev_priv); + icp_tc_hpd_detection_setup(dev_priv); +} + +static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return GEN11_HOTPLUG_CTL_ENABLE(hpd_pin); + default: + return 0; + } +} + +static u32 gen11_hotplug_enables(struct intel_encoder *encoder) +{ + return gen11_hotplug_mask(encoder->hpd_pin); +} + +static void dg1_hpd_invert(struct drm_i915_private *i915) +{ + u32 val = (INVERT_DDIA_HPD | + INVERT_DDIB_HPD | + INVERT_DDIC_HPD | + INVERT_DDID_HPD); + intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); +} + +static void dg1_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + dg1_hpd_invert(i915); + icp_hpd_enable_detection(encoder); +} + +static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + dg1_hpd_invert(dev_priv); + icp_hpd_irq_setup(dev_priv); +} + +static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, + intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); +} + +static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); +} + +static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, + intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); +} + +static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); +} + +static void gen11_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + gen11_tc_hpd_enable_detection(encoder); + gen11_tbt_hpd_enable_detection(encoder); + + if (INTEL_PCH_TYPE(i915) >= PCH_ICP) + icp_hpd_enable_detection(encoder); +} + +static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + + intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, + ~enabled_irqs & hotplug_irqs); + intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); + + gen11_tc_hpd_detection_setup(dev_priv); + gen11_tbt_hpd_detection_setup(dev_priv); + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_hpd_irq_setup(dev_priv); +} + +static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + case HPD_PORT_B: + return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin); + default: + return 0; + } +} + +static u32 mtp_ddi_hotplug_enables(struct intel_encoder *encoder) +{ + return mtp_ddi_hotplug_mask(encoder->hpd_pin); +} + +static u32 mtp_tc_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + return ICP_TC_HPD_ENABLE(hpd_pin); + default: + return 0; + } +} + +static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder) +{ + return mtp_tc_hotplug_mask(encoder->hpd_pin); +} + +static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915) +{ + intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables)); +} + +static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + mtp_ddi_hotplug_mask(encoder->hpd_pin), + mtp_ddi_hotplug_enables(encoder)); +} + +static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915) +{ + intel_de_rmw(i915, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask), + intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables)); +} + +static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + mtp_tc_hotplug_mask(encoder->hpd_pin), + mtp_tc_hotplug_enables(encoder)); +} + +static void mtp_hpd_invert(struct drm_i915_private *i915) +{ + u32 val = (INVERT_DDIA_HPD | + INVERT_DDIB_HPD | + INVERT_DDIC_HPD | + INVERT_TC1_HPD | + INVERT_TC2_HPD | + INVERT_TC3_HPD | + INVERT_TC4_HPD | + INVERT_DDID_HPD_MTP | + INVERT_DDIE_HPD); + intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val); +} + +static void mtp_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + mtp_hpd_invert(i915); + mtp_ddi_hpd_enable_detection(encoder); + mtp_tc_hpd_enable_detection(encoder); +} + +static void mtp_hpd_irq_setup(struct drm_i915_private *i915) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + + intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + + mtp_hpd_invert(i915); + ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); + + mtp_ddi_hpd_detection_setup(i915); + mtp_tc_hpd_detection_setup(i915); +} + +static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) +{ + return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4; +} + +static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, + enum hpd_pin hpd_pin, bool enable) +{ + u32 mask = XELPDP_TBT_HOTPLUG_ENABLE | + XELPDP_DP_ALT_HOTPLUG_ENABLE; + + if (!is_xelpdp_pica_hpd_pin(hpd_pin)) + return; + + intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), + mask, enable ? mask : 0); +} + +static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true); +} + +static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) +{ + struct intel_encoder *encoder; + u32 available_pins = 0; + enum hpd_pin pin; + + BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS); + + for_each_intel_encoder(&i915->drm, encoder) + available_pins |= BIT(encoder->hpd_pin); + + for_each_hpd_pin(pin) + _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin)); +} + +static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) +{ + xelpdp_pica_hpd_enable_detection(encoder); + mtp_hpd_enable_detection(encoder); +} + +static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd); + + intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs, + ~enabled_irqs & hotplug_irqs); + intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR); + + xelpdp_pica_hpd_detection_setup(i915); + + if (INTEL_PCH_TYPE(i915) >= PCH_MTP) + mtp_hpd_irq_setup(i915); +} + +static u32 spt_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + return PORTA_HOTPLUG_ENABLE; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE; + case HPD_PORT_D: + return PORTD_HOTPLUG_ENABLE; + default: + return 0; + } +} + +static u32 spt_hotplug_enables(struct intel_encoder *encoder) +{ + return spt_hotplug_mask(encoder->hpd_pin); +} + +static u32 spt_hotplug2_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_E: + return PORTE_HOTPLUG_ENABLE; + default: + return 0; + } +} + +static u32 spt_hotplug2_enables(struct intel_encoder *encoder) +{ + return spt_hotplug2_mask(encoder->hpd_pin); +} + +static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + /* Display WA #1179 WaHardHangonHotPlug: cnp */ + if (HAS_PCH_CNP(dev_priv)) { + intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); + } + + /* Enable digital hotplug on the PCH */ + intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); + + intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, + intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), + intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); +} + +static void spt_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + /* Display WA #1179 WaHardHangonHotPlug: cnp */ + if (HAS_PCH_CNP(i915)) { + intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, + CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); + } + + intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, + spt_hotplug_mask(encoder->hpd_pin), + spt_hotplug_enables(encoder)); + + intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2, + spt_hotplug2_mask(encoder->hpd_pin), + spt_hotplug2_enables(encoder)); +} + +static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + spt_hpd_detection_setup(dev_priv); +} + +static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + return DIGITAL_PORTA_HOTPLUG_ENABLE | + DIGITAL_PORTA_PULSE_DURATION_MASK; + default: + return 0; + } +} + +static u32 ilk_hotplug_enables(struct intel_encoder *encoder) +{ + switch (encoder->hpd_pin) { + case HPD_PORT_A: + return DIGITAL_PORTA_HOTPLUG_ENABLE | + DIGITAL_PORTA_PULSE_DURATION_2ms; + default: + return 0; + } +} + +static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + /* + * Enable digital hotplug on the CPU, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec) + * The pulse duration bits are reserved on HSW+. + */ + intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, + intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); +} + +static void ilk_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, + ilk_hotplug_mask(encoder->hpd_pin), + ilk_hotplug_enables(encoder)); + + ibx_hpd_enable_detection(encoder); +} + +static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + + if (DISPLAY_VER(dev_priv) >= 8) + bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + else + ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); + + ilk_hpd_detection_setup(dev_priv); + + ibx_hpd_irq_setup(dev_priv); +} + +static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin) +{ + switch (hpd_pin) { + case HPD_PORT_A: + return PORTA_HOTPLUG_ENABLE | BXT_DDIA_HPD_INVERT; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE | BXT_DDIB_HPD_INVERT; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE | BXT_DDIC_HPD_INVERT; + default: + return 0; + } +} + +static u32 bxt_hotplug_enables(struct intel_encoder *encoder) +{ + u32 hotplug; + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + hotplug = PORTA_HOTPLUG_ENABLE; + if (intel_bios_encoder_hpd_invert(encoder->devdata)) + hotplug |= BXT_DDIA_HPD_INVERT; + return hotplug; + case HPD_PORT_B: + hotplug = PORTB_HOTPLUG_ENABLE; + if (intel_bios_encoder_hpd_invert(encoder->devdata)) + hotplug |= BXT_DDIB_HPD_INVERT; + return hotplug; + case HPD_PORT_C: + hotplug = PORTC_HOTPLUG_ENABLE; + if (intel_bios_encoder_hpd_invert(encoder->devdata)) + hotplug |= BXT_DDIC_HPD_INVERT; + return hotplug; + default: + return 0; + } +} + +static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); +} + +static void bxt_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, + bxt_hotplug_mask(encoder->hpd_pin), + bxt_hotplug_enables(encoder)); +} + +static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + + bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + + bxt_hpd_detection_setup(dev_priv); +} + +static void i915_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; + + /* HPD sense and interrupt enable are one and the same */ + i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); +} + +static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_en; + + lockdep_assert_held(&dev_priv->irq_lock); + + /* + * Note HDMI and DP share hotplug bits. Enable bits are the same for all + * generations. + */ + hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); + /* + * Programming the CRT detection parameters tends to generate a spurious + * hotplug event about three seconds later. So just do it once. + */ + if (IS_G4X(dev_priv)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + + /* Ignore TV since it's buggy */ + i915_hotplug_interrupt_update_locked(dev_priv, + HOTPLUG_INT_EN_MASK | + CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | + CRT_HOTPLUG_ACTIVATION_PERIOD_64, + hotplug_en); +} + +struct intel_hotplug_funcs { + /* Enable HPD sense and interrupts for all present encoders */ + void (*hpd_irq_setup)(struct drm_i915_private *i915); + /* Enable HPD sense for a single encoder */ + void (*hpd_enable_detection)(struct intel_encoder *encoder); +}; + +#define HPD_FUNCS(platform) \ +static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ + .hpd_irq_setup = platform##_hpd_irq_setup, \ + .hpd_enable_detection = platform##_hpd_enable_detection, \ +} + +HPD_FUNCS(i915); +HPD_FUNCS(xelpdp); +HPD_FUNCS(dg1); +HPD_FUNCS(gen11); +HPD_FUNCS(bxt); +HPD_FUNCS(icp); +HPD_FUNCS(spt); +HPD_FUNCS(ilk); +#undef HPD_FUNCS + +void intel_hpd_enable_detection(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + if (i915->display.funcs.hotplug) + i915->display.funcs.hotplug->hpd_enable_detection(encoder); +} + +void intel_hpd_irq_setup(struct drm_i915_private *i915) +{ + if (i915->display_irqs_enabled && i915->display.funcs.hotplug) + i915->display.funcs.hotplug->hpd_irq_setup(i915); +} + +void intel_hotplug_irq_init(struct drm_i915_private *i915) +{ + intel_hpd_init_pins(i915); + + intel_hpd_init_early(i915); + + if (HAS_GMCH(i915)) { + if (I915_HAS_HOTPLUG(i915)) + i915->display.funcs.hotplug = &i915_hpd_funcs; + } else { + if (HAS_PCH_DG2(i915)) + i915->display.funcs.hotplug = &icp_hpd_funcs; + else if (HAS_PCH_DG1(i915)) + i915->display.funcs.hotplug = &dg1_hpd_funcs; + else if (DISPLAY_VER(i915) >= 14) + i915->display.funcs.hotplug = &xelpdp_hpd_funcs; + else if (DISPLAY_VER(i915) >= 11) + i915->display.funcs.hotplug = &gen11_hpd_funcs; + else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + i915->display.funcs.hotplug = &bxt_hpd_funcs; + else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) + i915->display.funcs.hotplug = &icp_hpd_funcs; + else if (INTEL_PCH_TYPE(i915) >= PCH_SPT) + i915->display.funcs.hotplug = &spt_hpd_funcs; + else + i915->display.funcs.hotplug = &ilk_hpd_funcs; + } +} diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h new file mode 100644 index 000000000000..e4db752df096 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_HOTPLUG_IRQ_H__ +#define __INTEL_HOTPLUG_IRQ_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_encoder; + +u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915); + +void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status); +void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); +void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); +void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir); +void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); +void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir); +void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir); +void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir); + +void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915, + u32 mask, u32 bits); +void i915_hotplug_interrupt_update(struct drm_i915_private *i915, + u32 mask, u32 bits); + +void intel_hpd_enable_detection(struct intel_encoder *encoder); +void intel_hpd_irq_setup(struct drm_i915_private *i915); + +void intel_hotplug_irq_init(struct drm_i915_private *i915); + +#endif /* __INTEL_HOTPLUG_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c index c518efebdf77..a92d008d4e6e 100644 --- a/drivers/gpu/drm/i915/display/intel_hti.c +++ b/drivers/gpu/drm/i915/display/intel_hti.c @@ -15,7 +15,7 @@ void intel_hti_init(struct drm_i915_private *i915) * If the platform has HTI, we need to find out whether it has reserved * any display resources before we create our display outputs. */ - if (INTEL_INFO(i915)->display.has_hti) + if (DISPLAY_INFO(i915)->has_hti) i915->display.hti.state = intel_de_read(i915, HDPORT_STATE); } diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c new file mode 100644 index 000000000000..d5a0aecf3e8f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_load_detect.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_uapi.h> + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_crtc.h" +#include "intel_display_types.h" +#include "intel_load_detect.h" + +/* VESA 640x480x72Hz mode to set on the pipe */ +static const struct drm_display_mode load_detect_mode = { + DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, + 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), +}; + +static int intel_modeset_disable_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int ret, i; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; + + for_each_new_plane_in_state(state, plane, plane_state, i) { + if (plane_state->crtc != crtc) + continue; + + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret) + return ret; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + } + + return 0; +} + +struct drm_atomic_state * +intel_load_detect_get_pipe(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_encoder *encoder = + intel_attached_encoder(to_intel_connector(connector)); + struct intel_crtc *possible_crtc; + struct intel_crtc *crtc = NULL; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_mode_config *config = &dev->mode_config; + struct drm_atomic_state *state = NULL, *restore_state = NULL; + struct drm_connector_state *connector_state; + struct intel_crtc_state *crtc_state; + int ret; + + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, connector->name, + encoder->base.base.id, encoder->base.name); + + drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); + + /* + * Algorithm gets a little messy: + * + * - if the connector already has an assigned crtc, use it (but make + * sure it's on first) + * + * - try to find the first unused crtc that can drive this connector, + * and use that if we find one + */ + + /* See if we already have a CRTC for this connector */ + if (connector->state->crtc) { + crtc = to_intel_crtc(connector->state->crtc); + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + goto fail; + + /* Make sure the crtc and connector are running */ + goto found; + } + + /* Find an unused one (if possible) */ + for_each_intel_crtc(dev, possible_crtc) { + if (!(encoder->base.possible_crtcs & + drm_crtc_mask(&possible_crtc->base))) + continue; + + ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); + if (ret) + goto fail; + + if (possible_crtc->base.state->enable) { + drm_modeset_unlock(&possible_crtc->base.mutex); + continue; + } + + crtc = possible_crtc; + break; + } + + /* + * If we didn't find an unused CRTC, don't use any. + */ + if (!crtc) { + drm_dbg_kms(&dev_priv->drm, + "no pipe available for load-detect\n"); + ret = -ENODEV; + goto fail; + } + +found: + state = drm_atomic_state_alloc(dev); + restore_state = drm_atomic_state_alloc(dev); + if (!state || !restore_state) { + ret = -ENOMEM; + goto fail; + } + + state->acquire_ctx = ctx; + to_intel_atomic_state(state)->internal = true; + + restore_state->acquire_ctx = ctx; + to_intel_atomic_state(restore_state)->internal = true; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) { + ret = PTR_ERR(connector_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); + if (ret) + goto fail; + + crtc_state = intel_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + + crtc_state->uapi.active = true; + + ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, + &load_detect_mode); + if (ret) + goto fail; + + ret = intel_modeset_disable_planes(state, &crtc->base); + if (ret) + goto fail; + + ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); + if (!ret) + ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); + if (!ret) + ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "Failed to create a copy of old state to restore: %i\n", + ret); + goto fail; + } + + ret = drm_atomic_commit(state); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "failed to set mode on load-detect pipe\n"); + goto fail; + } + + drm_atomic_state_put(state); + + /* let the connector get through one full cycle before testing */ + intel_crtc_wait_for_next_vblank(crtc); + + return restore_state; + +fail: + if (state) { + drm_atomic_state_put(state); + state = NULL; + } + if (restore_state) { + drm_atomic_state_put(restore_state); + restore_state = NULL; + } + + if (ret == -EDEADLK) + return ERR_PTR(ret); + + return NULL; +} + +void intel_load_detect_release_pipe(struct drm_connector *connector, + struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_encoder *intel_encoder = + intel_attached_encoder(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); + struct drm_encoder *encoder = &intel_encoder->base; + int ret; + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, connector->name, + encoder->base.id, encoder->name); + + if (IS_ERR_OR_NULL(state)) + return; + + ret = drm_atomic_helper_commit_duplicated_state(state, ctx); + if (ret) + drm_dbg_kms(&i915->drm, + "Couldn't release load detect pipe: %i\n", ret); + drm_atomic_state_put(state); +} diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.h b/drivers/gpu/drm/i915/display/intel_load_detect.h new file mode 100644 index 000000000000..aed51901b9ba --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_load_detect.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_LOAD_DETECT_H__ +#define __INTEL_LOAD_DETECT_H__ + +struct drm_atomic_state; +struct drm_connector; +struct drm_modeset_acquire_ctx; + +struct drm_atomic_state * +intel_load_detect_get_pipe(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx); +void intel_load_detect_release_pipe(struct drm_connector *connector, + struct drm_atomic_state *old, + struct drm_modeset_acquire_ctx *ctx); + +#endif /* __INTEL_LOAD_DETECT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 0de44b3631cd..3ace56979b70 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -150,7 +150,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, if (DISPLAY_VER(dev_priv) < 4) { tmp = intel_de_read(dev_priv, PFIT_CONTROL); - crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE; } crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; @@ -437,6 +437,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder, crtc_state->pipe_bpp = lvds_bpp; } + crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; /* @@ -943,17 +944,8 @@ void intel_lvds_init(struct drm_i915_private *i915) */ mutex_lock(&i915->drm.mode_config.mutex); if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { - const struct edid *edid; - - /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */ - edid = drm_get_edid_switcheroo(&connector->base, - intel_gmbus_get_adapter(i915, pin)); - if (edid) { - drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); - kfree(edid); - } else { - drm_edid = NULL; - } + drm_edid = drm_edid_read_switcheroo(&connector->base, + intel_gmbus_get_adapter(i915, pin)); } else { drm_edid = drm_edid_read_ddc(&connector->base, intel_gmbus_get_adapter(i915, pin)); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_lock.c b/drivers/gpu/drm/i915/display/intel_modeset_lock.c new file mode 100644 index 000000000000..8fb6fd849a75 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_modeset_lock.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_modeset_lock.h> + +#include "intel_display_types.h" +#include "intel_modeset_lock.h" + +void _intel_modeset_lock_begin(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + unsigned int flags, int *ret) +{ + drm_modeset_acquire_init(ctx, flags); + + if (state) + state->base.acquire_ctx = ctx; + + *ret = -EDEADLK; +} + +bool _intel_modeset_lock_loop(int *ret) +{ + if (*ret == -EDEADLK) { + *ret = 0; + return true; + } + + return false; +} + +void _intel_modeset_lock_end(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + int *ret) +{ + if (*ret == -EDEADLK) { + if (state) + drm_atomic_state_clear(&state->base); + + *ret = drm_modeset_backoff(ctx); + if (*ret == 0) { + *ret = -EDEADLK; + return; + } + } + + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); +} diff --git a/drivers/gpu/drm/i915/display/intel_modeset_lock.h b/drivers/gpu/drm/i915/display/intel_modeset_lock.h new file mode 100644 index 000000000000..edb5099bcd99 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_modeset_lock.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_MODESET_LOCK_H__ +#define __INTEL_MODESET_LOCK_H__ + +#include <linux/types.h> + +struct drm_modeset_acquire_ctx; +struct intel_atomic_state; + +void _intel_modeset_lock_begin(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + unsigned int flags, + int *ret); +bool _intel_modeset_lock_loop(int *ret); +void _intel_modeset_lock_end(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + int *ret); + +/* + * Note that one must always use "continue" rather than + * "break" or "return" to handle errors within the + * intel_modeset_lock_ctx_retry() block. + */ +#define intel_modeset_lock_ctx_retry(ctx, state, flags, ret) \ + for (_intel_modeset_lock_begin((ctx), (state), (flags), &(ret)); \ + _intel_modeset_lock_loop(&(ret)); \ + _intel_modeset_lock_end((ctx), (state), &(ret))) + +#endif /* __INTEL_MODESET_LOCK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 134b943f1953..5ff99ca7f1de 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -26,28 +26,21 @@ #include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" +#include "intel_tc.h" #include "intel_vblank.h" #include "intel_wm.h" #include "skl_watermark.h" -static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, - struct drm_modeset_acquire_ctx *ctx) +static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { - struct intel_encoder *encoder; struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_bw_state *bw_state = - to_intel_bw_state(i915->display.bw.obj.state); - struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->display.cdclk.obj.state); - struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; struct drm_atomic_state *state; - struct intel_crtc_state *temp_crtc_state; + struct intel_crtc *temp_crtc; enum pipe pipe = crtc->pipe; - int ret; if (!crtc_state->hw.active) return; @@ -69,12 +62,20 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, } state->acquire_ctx = ctx; + to_intel_atomic_state(state)->internal = true; /* Everything's already locked, -EDEADLK can't happen. */ - temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); - ret = drm_atomic_add_affected_connectors(state, &crtc->base); + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, + BIT(pipe) | + intel_crtc_bigjoiner_slave_pipes(crtc_state)) { + struct intel_crtc_state *temp_crtc_state = + intel_atomic_get_crtc_state(state, temp_crtc); + int ret; - drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); + ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base); + + drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); + } i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc); @@ -87,16 +88,78 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, crtc->active = false; crtc->base.enabled = false; - drm_WARN_ON(&i915->drm, - drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); - crtc_state->uapi.active = false; - crtc_state->uapi.connector_mask = 0; - crtc_state->uapi.encoder_mask = 0; - intel_crtc_free_hw_state(crtc_state); - memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); + if (crtc_state->shared_dpll) + intel_unreference_shared_dpll_crtc(crtc, + crtc_state->shared_dpll, + &crtc_state->shared_dpll->state); +} + +static void set_encoder_for_connector(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + struct drm_connector_state *conn_state = connector->base.state; + + if (conn_state->crtc) + drm_connector_put(&connector->base); + + if (encoder) { + conn_state->best_encoder = &encoder->base; + conn_state->crtc = encoder->base.crtc; + drm_connector_get(&connector->base); + } else { + conn_state->best_encoder = NULL; + conn_state->crtc = NULL; + } +} + +static void reset_encoder_connector_state(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->base.encoder != &encoder->base) + continue; + + set_encoder_for_connector(connector, NULL); + + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + } + drm_connector_list_iter_end(&conn_iter); +} + +static void reset_crtc_encoder_state(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_encoder *encoder; - for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) + for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) { + reset_encoder_connector_state(encoder); encoder->base.crtc = NULL; + } +} + +static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_bw_state *bw_state = + to_intel_bw_state(i915->display.bw.obj.state); + struct intel_cdclk_state *cdclk_state = + to_intel_cdclk_state(i915->display.cdclk.obj.state); + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + + __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); + intel_crtc_free_hw_state(crtc_state); + intel_crtc_state_reset(crtc_state, crtc); + + reset_crtc_encoder_state(crtc); intel_fbc_disable(crtc); intel_update_watermarks(i915); @@ -113,6 +176,118 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, bw_state->num_active_planes[pipe] = 0; } +/* + * Return all the pipes using a transcoder in @transcoder_mask. + * For bigjoiner configs return only the bigjoiner master. + */ +static u8 get_transcoder_pipes(struct drm_i915_private *i915, + u8 transcoder_mask) +{ + struct intel_crtc *temp_crtc; + u8 pipes = 0; + + for_each_intel_crtc(&i915->drm, temp_crtc) { + struct intel_crtc_state *temp_crtc_state = + to_intel_crtc_state(temp_crtc->base.state); + + if (temp_crtc_state->cpu_transcoder == INVALID_TRANSCODER) + continue; + + if (intel_crtc_is_bigjoiner_slave(temp_crtc_state)) + continue; + + if (transcoder_mask & BIT(temp_crtc_state->cpu_transcoder)) + pipes |= BIT(temp_crtc->pipe); + } + + return pipes; +} + +/* + * Return the port sync master and slave pipes linked to @crtc. + * For bigjoiner configs return only the bigjoiner master pipes. + */ +static void get_portsync_pipes(struct intel_crtc *crtc, + u8 *master_pipe_mask, u8 *slave_pipes_mask) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_crtc *master_crtc; + struct intel_crtc_state *master_crtc_state; + enum transcoder master_transcoder; + + if (!is_trans_port_sync_mode(crtc_state)) { + *master_pipe_mask = BIT(crtc->pipe); + *slave_pipes_mask = 0; + + return; + } + + if (is_trans_port_sync_master(crtc_state)) + master_transcoder = crtc_state->cpu_transcoder; + else + master_transcoder = crtc_state->master_transcoder; + + *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder)); + drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask)); + + master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1); + master_crtc_state = to_intel_crtc_state(master_crtc->base.state); + *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask); +} + +static u8 get_bigjoiner_slave_pipes(struct drm_i915_private *i915, u8 master_pipes_mask) +{ + struct intel_crtc *master_crtc; + u8 pipes = 0; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, master_crtc, master_pipes_mask) { + struct intel_crtc_state *master_crtc_state = + to_intel_crtc_state(master_crtc->base.state); + + pipes |= intel_crtc_bigjoiner_slave_pipes(master_crtc_state); + } + + return pipes; +} + +static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u8 portsync_master_mask; + u8 portsync_slaves_mask; + u8 bigjoiner_slaves_mask; + struct intel_crtc *temp_crtc; + + /* TODO: Add support for MST */ + get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask); + bigjoiner_slaves_mask = get_bigjoiner_slave_pipes(i915, + portsync_master_mask | + portsync_slaves_mask); + + drm_WARN_ON(&i915->drm, + portsync_master_mask & portsync_slaves_mask || + portsync_master_mask & bigjoiner_slaves_mask || + portsync_slaves_mask & bigjoiner_slaves_mask); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, bigjoiner_slaves_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, + bigjoiner_slaves_mask | + portsync_slaves_mask | + portsync_master_mask) + intel_crtc_disable_noatomic_complete(temp_crtc); +} + static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915) { struct intel_connector *connector; @@ -124,8 +299,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private struct intel_encoder *encoder = to_intel_encoder(connector->base.encoder); - if (conn_state->crtc) - drm_connector_put(&connector->base); + set_encoder_for_connector(connector, encoder); if (encoder) { struct intel_crtc *crtc = @@ -133,14 +307,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - conn_state->best_encoder = &encoder->base; - conn_state->crtc = &crtc->base; conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; - - drm_connector_get(&connector->base); - } else { - conn_state->best_encoder = NULL; - conn_state->crtc = NULL; } } drm_connector_list_iter_end(&conn_iter); @@ -213,6 +380,21 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc) return false; } +static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (dig_port && intel_tc_port_link_needs_reset(dig_port)) + return true; + } + + return false; +} + static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); @@ -255,11 +437,12 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state !HAS_GMCH(i915)); } -static void intel_sanitize_crtc(struct intel_crtc *crtc, +static bool intel_sanitize_crtc(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + bool needs_link_reset; if (crtc_state->hw.active) { struct intel_plane *plane; @@ -279,13 +462,67 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, intel_color_commit_arm(crtc_state); } + if (!crtc_state->hw.active || + intel_crtc_is_bigjoiner_slave(crtc_state)) + return false; + + needs_link_reset = intel_crtc_needs_link_reset(crtc); + /* * Adjust the state of the output pipe according to whether we have * active connectors/encoders. */ - if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && - !intel_crtc_is_bigjoiner_slave(crtc_state)) - intel_crtc_disable_noatomic(crtc, ctx); + if (!needs_link_reset && intel_crtc_has_encoders(crtc)) + return false; + + intel_crtc_disable_noatomic(crtc, ctx); + + /* + * The HPD state on other active/disconnected TC ports may be stuck in + * the connected state until this port is disabled and a ~10ms delay has + * passed, wait here for that so that sanitizing other CRTCs will see the + * up-to-date HPD state. + */ + if (needs_link_reset) + msleep(20); + + return true; +} + +static void intel_sanitize_all_crtcs(struct drm_i915_private *i915, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_crtc *crtc; + u32 crtcs_forced_off = 0; + + /* + * An active and disconnected TypeC port prevents the HPD live state + * to get updated on other active/disconnected TypeC ports, so after + * a port gets disabled the CRTCs using other TypeC ports must be + * rechecked wrt. their link status. + */ + for (;;) { + u32 old_mask = crtcs_forced_off; + + for_each_intel_crtc(&i915->drm, crtc) { + u32 crtc_mask = drm_crtc_mask(&crtc->base); + + if (crtcs_forced_off & crtc_mask) + continue; + + if (intel_sanitize_crtc(crtc, ctx)) + crtcs_forced_off |= crtc_mask; + } + if (crtcs_forced_off == old_mask) + break; + } + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state"); + } } static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) @@ -559,7 +796,8 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) */ crtc_state->inherited = true; - intel_crtc_update_active_timings(crtc_state); + intel_crtc_update_active_timings(crtc_state, + crtc_state->vrr.enable); intel_crtc_copy_hw_to_uapi_state(crtc_state); } @@ -698,16 +936,14 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, for_each_intel_encoder(&i915->drm, encoder) intel_sanitize_encoder(encoder); - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - intel_sanitize_crtc(crtc, ctx); - intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state"); - } - + /* + * Sanitizing CRTCs needs their connector atomic state to be + * up-to-date, so ensure that already here. + */ intel_modeset_update_connector_atomic_state(i915); + intel_sanitize_all_crtcs(i915, ctx); + intel_dpll_sanitize_state(i915); intel_wm_get_hw_state(i915); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index 842d70f0dfd2..138144a65a45 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -11,6 +11,7 @@ #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_crtc_state_dump.h" +#include "intel_cx0_phy.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fdi.h" @@ -34,27 +35,28 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, if (connector->get_hw_state(connector)) { struct intel_encoder *encoder = intel_attached_encoder(connector); - I915_STATE_WARN(!crtc_state, + I915_STATE_WARN(i915, !crtc_state, "connector enabled without attached crtc\n"); if (!crtc_state) return; - I915_STATE_WARN(!crtc_state->hw.active, + I915_STATE_WARN(i915, !crtc_state->hw.active, "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; - I915_STATE_WARN(conn_state->best_encoder != &encoder->base, + I915_STATE_WARN(i915, + conn_state->best_encoder != &encoder->base, "atomic encoder doesn't match attached encoder\n"); - I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, + I915_STATE_WARN(i915, conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(crtc_state && crtc_state->hw.active, + I915_STATE_WARN(i915, crtc_state && crtc_state->hw.active, "attached crtc is active, but connector isn't\n"); - I915_STATE_WARN(!crtc_state && conn_state->best_encoder, + I915_STATE_WARN(i915, !crtc_state && conn_state->best_encoder, "best encoder set without crtc!\n"); } } @@ -79,7 +81,7 @@ verify_connector_state(struct intel_atomic_state *state, intel_connector_verify_state(crtc_state, new_conn_state); - I915_STATE_WARN(new_conn_state->best_encoder != encoder, + I915_STATE_WARN(to_i915(connector->dev), new_conn_state->best_encoder != encoder, "connector's atomic encoder doesn't match legacy encoder\n"); } } @@ -130,15 +132,15 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat found = true; enabled = true; - I915_STATE_WARN(new_conn_state->crtc != - encoder->base.crtc, + I915_STATE_WARN(dev_priv, + new_conn_state->crtc != encoder->base.crtc, "connector's crtc doesn't match encoder crtc\n"); } if (!found) continue; - I915_STATE_WARN(!!encoder->base.crtc != enabled, + I915_STATE_WARN(dev_priv, !!encoder->base.crtc != enabled, "encoder's enabled state mismatch (expected %i, found %i)\n", !!encoder->base.crtc, enabled); @@ -146,7 +148,7 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active, + I915_STATE_WARN(dev_priv, active, "encoder detached but still enabled on pipe %c.\n", pipe_name(pipe)); } @@ -181,11 +183,12 @@ verify_crtc_state(struct intel_crtc *crtc, if (IS_I830(dev_priv) && pipe_config->hw.active) pipe_config->hw.active = new_crtc_state->hw.active; - I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, + I915_STATE_WARN(dev_priv, + new_crtc_state->hw.active != pipe_config->hw.active, "crtc active state doesn't match with hw state (expected %i, found %i)\n", new_crtc_state->hw.active, pipe_config->hw.active); - I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, + I915_STATE_WARN(dev_priv, crtc->active != new_crtc_state->hw.active, "transitional active state does not match atomic hw state (expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); @@ -196,12 +199,12 @@ verify_crtc_state(struct intel_crtc *crtc, bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active != new_crtc_state->hw.active, + I915_STATE_WARN(dev_priv, active != new_crtc_state->hw.active, "[ENCODER:%i] active %i with crtc active %i\n", encoder->base.base.id, active, new_crtc_state->hw.active); - I915_STATE_WARN(active && master_crtc->pipe != pipe, + I915_STATE_WARN(dev_priv, active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -216,7 +219,7 @@ verify_crtc_state(struct intel_crtc *crtc, if (!intel_pipe_config_compare(new_crtc_state, pipe_config, false)) { - I915_STATE_WARN(1, "pipe state doesn't match!\n"); + I915_STATE_WARN(dev_priv, 1, "pipe state doesn't match!\n"); intel_crtc_state_dump(pipe_config, NULL, "hw state"); intel_crtc_state_dump(new_crtc_state, NULL, "sw state"); } @@ -236,6 +239,7 @@ void intel_modeset_verify_crtc(struct intel_crtc *crtc, verify_crtc_state(crtc, old_crtc_state, new_crtc_state); intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state); intel_mpllb_state_verify(state, new_crtc_state); + intel_c10pll_state_verify(state, new_crtc_state); } void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index c12bdca8da9b..d6fe2bbabe55 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -935,21 +935,25 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ if (DISPLAY_VER(dev_priv) >= 4) { + u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS); + /* on i965 use the PGM reg to read out the autoscaler values */ - ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; + ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp); } else { - if (pfit_control & VERT_AUTO_SCALE) - ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS); + u32 tmp; + + if (intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_VERT_AUTO_SCALE) + tmp = intel_de_read(dev_priv, PFIT_AUTO_RATIOS); else - ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS); - ratio >>= PFIT_VERT_SCALE_SHIFT; + tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS); + + ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp); } overlay->pfit_vscale_ratio = ratio; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index ce2a34a25211..9232a305b1e6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -42,6 +42,7 @@ #include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" +#include "intel_vrr.h" bool intel_panel_use_ssc(struct drm_i915_private *i915) { @@ -58,6 +59,38 @@ intel_panel_preferred_fixed_mode(struct intel_connector *connector) struct drm_display_mode, head); } +static bool is_in_vrr_range(struct intel_connector *connector, int vrefresh) +{ + const struct drm_display_info *info = &connector->base.display_info; + + return intel_vrr_is_capable(connector) && + vrefresh >= info->monitor_range.min_vfreq && + vrefresh <= info->monitor_range.max_vfreq; +} + +static bool is_best_fixed_mode(struct intel_connector *connector, + int vrefresh, int fixed_mode_vrefresh, + const struct drm_display_mode *best_mode) +{ + /* we want to always return something */ + if (!best_mode) + return true; + + /* + * With VRR always pick a mode with equal/higher than requested + * vrefresh, which we can then reduce to match the requested + * vrefresh by extending the vblank length. + */ + if (is_in_vrr_range(connector, vrefresh) && + is_in_vrr_range(connector, fixed_mode_vrefresh) && + fixed_mode_vrefresh < vrefresh) + return false; + + /* pick the fixed_mode that is closest in terms of vrefresh */ + return abs(fixed_mode_vrefresh - vrefresh) < + abs(drm_mode_vrefresh(best_mode) - vrefresh); +} + const struct drm_display_mode * intel_panel_fixed_mode(struct intel_connector *connector, const struct drm_display_mode *mode) @@ -65,11 +98,11 @@ intel_panel_fixed_mode(struct intel_connector *connector, const struct drm_display_mode *fixed_mode, *best_mode = NULL; int vrefresh = drm_mode_vrefresh(mode); - /* pick the fixed_mode that is closest in terms of vrefresh */ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { - if (!best_mode || - abs(drm_mode_vrefresh(fixed_mode) - vrefresh) < - abs(drm_mode_vrefresh(best_mode) - vrefresh)) + int fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode); + + if (is_best_fixed_mode(connector, vrefresh, + fixed_mode_vrefresh, best_mode)) best_mode = fixed_mode; } @@ -178,27 +211,46 @@ int intel_panel_compute_config(struct intel_connector *connector, { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); + int vrefresh, fixed_mode_vrefresh; + bool is_vrr; if (!fixed_mode) return 0; + vrefresh = drm_mode_vrefresh(adjusted_mode); + fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode); + /* - * We don't want to lie too much to the user about the refresh - * rate they're going to get. But we have to allow a bit of latitude - * for Xorg since it likes to automagically cook up modes with slightly - * off refresh rates. + * Assume that we shouldn't muck about with the + * timings if they don't land in the VRR range. */ - if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) { - drm_dbg_kms(connector->base.dev, - "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n", - connector->base.base.id, connector->base.name, - drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode)); + is_vrr = is_in_vrr_range(connector, vrefresh) && + is_in_vrr_range(connector, fixed_mode_vrefresh); - return -EINVAL; + if (!is_vrr) { + /* + * We don't want to lie too much to the user about the refresh + * rate they're going to get. But we have to allow a bit of latitude + * for Xorg since it likes to automagically cook up modes with slightly + * off refresh rates. + */ + if (abs(vrefresh - fixed_mode_vrefresh) > 1) { + drm_dbg_kms(connector->base.dev, + "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n", + connector->base.base.id, connector->base.name, + vrefresh, fixed_mode_vrefresh); + + return -EINVAL; + } } drm_mode_copy(adjusted_mode, fixed_mode); + if (is_vrr && fixed_mode_vrefresh != vrefresh) + adjusted_mode->vtotal = + DIV_ROUND_CLOSEST(adjusted_mode->clock * 1000, + adjusted_mode->htotal * vrefresh); + drm_mode_set_crtcinfo(adjusted_mode, 0); return 0; @@ -512,11 +564,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, bits = panel_fitter_scaling(pipe_src_h, adjusted_mode->crtc_vdisplay); - *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | - bits << PFIT_VERT_SCALE_SHIFT); + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); *pfit_control |= (PFIT_ENABLE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); } } else if (scaled_width < scaled_height) { /* letter */ centre_vertically(adjusted_mode, @@ -527,18 +579,19 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, bits = panel_fitter_scaling(pipe_src_w, adjusted_mode->crtc_hdisplay); - *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | - bits << PFIT_VERT_SCALE_SHIFT); + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); *pfit_control |= (PFIT_ENABLE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); } } else { /* Aspects match, Let hw scale both directions */ *pfit_control |= (PFIT_ENABLE | - VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); + PFIT_VERT_AUTO_SCALE | + PFIT_HORIZ_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); } } @@ -586,10 +639,10 @@ static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, if (DISPLAY_VER(dev_priv) >= 4) pfit_control |= PFIT_SCALING_AUTO; else - pfit_control |= (VERT_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_AUTO_SCALE | - HORIZ_INTERP_BILINEAR); + pfit_control |= (PFIT_VERT_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_AUTO_SCALE | + PFIT_HORIZ_INTERP_BILINEAR); } break; default: @@ -610,7 +663,7 @@ out: /* Make sure pre-965 set dither correctly for 18bpp panels. */ if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; + pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; crtc_state->gmch_pfit.control = pfit_control; crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 2411fe4dee8b..866786e6b32f 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -43,11 +43,12 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - I915_STATE_WARN(state && port_pipe == pipe, + I915_STATE_WARN(dev_priv, state && port_pipe == pipe, "PCH DP %c enabled on transcoder %c, should be disabled\n", port_name(port), pipe_name(pipe)); - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + I915_STATE_WARN(dev_priv, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, "IBX PCH DP %c still using transcoder B\n", port_name(port)); } @@ -61,11 +62,12 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - I915_STATE_WARN(state && port_pipe == pipe, + I915_STATE_WARN(dev_priv, state && port_pipe == pipe, "PCH HDMI %c enabled on transcoder %c, should be disabled\n", port_name(port), pipe_name(pipe)); - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + I915_STATE_WARN(dev_priv, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, "IBX PCH HDMI %c still using transcoder B\n", port_name(port)); } @@ -79,13 +81,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && - port_pipe == pipe, + I915_STATE_WARN(dev_priv, + intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && port_pipe == pipe, "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); - I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && - port_pipe == pipe, + I915_STATE_WARN(dev_priv, + intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -103,7 +105,7 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(enabled, + I915_STATE_WARN(dev_priv, enabled, "transcoder assertion failed, should be off on pipe %c but is still active\n", pipe_name(pipe)); } diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 8d3ea8d7b737..5a468ed6e26c 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -293,6 +293,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) } state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; retry: pipe_config = intel_atomic_get_crtc_state(state, crtc); diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 7f9926672a6a..5e7ba594e7e7 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -787,7 +787,7 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp) vdd = false; with_intel_pps_lock(intel_dp, wakeref) vdd = intel_pps_vdd_on_unlocked(intel_dp); - I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", + I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(i915, &intel_dp->pps)); @@ -899,7 +899,8 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) if (!intel_dp_is_edp(intel_dp)) return; - I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on", + I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd, + "[ENCODER:%d:%s] %s VDD not forced on", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(dev_priv, &intel_dp->pps)); @@ -1653,12 +1654,9 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) */ pps_num = intel_num_pps(dev_priv); - for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { - u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx)); - - val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; - intel_de_write(dev_priv, PP_CONTROL(pps_idx), val); - } + for (pps_idx = 0; pps_idx < pps_num; pps_idx++) + intel_de_rmw(dev_priv, PP_CONTROL(pps_idx), + PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); } void intel_pps_setup(struct drm_i915_private *i915) @@ -1724,7 +1722,7 @@ void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; - I915_STATE_WARN(panel_pipe == pipe && locked, + I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked, "panel assertion failure, pipe %c regs locked\n", pipe_name(pipe)); } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 6badfff2b4a2..ea0389c5f656 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -85,6 +85,91 @@ * use page flips. */ +/* + * Description of PSR mask bits: + * + * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl): + * + * When unmasked (nearly) all display register writes (eg. even + * SWF) trigger a PSR exit. Some registers are excluded from this + * and they have a more specific mask (described below). On icl+ + * this bit no longer exists and is effectively always set. + * + * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+): + * + * When unmasked (nearly) all pipe/plane register writes + * trigger a PSR exit. Some plane registers are excluded from this + * and they have a more specific mask (described below). + * + * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+): + * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw): + * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw): + * + * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit. + * SPR_SURF/CURBASE are not included in this and instead are + * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or + * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw). + * + * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw): + * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw): + * + * When unmasked PSR is blocked as long as the sprite + * plane is enabled. skl+ with their universal planes no + * longer have a mask bit like this, and no plane being + * enabledb blocks PSR. + * + * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw): + * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw): + * + * When umasked CURPOS writes trigger a PSR exit. On skl+ + * this doesn't exit but CURPOS is included in the + * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask. + * + * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+): + * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw): + * + * When unmasked PSR is blocked as long as vblank and/or vsync + * interrupt is unmasked in IMR *and* enabled in IER. + * + * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+): + * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw): + * + * Selectcs whether PSR exit generates an extra vblank before + * the first frame is transmitted. Also note the opposite polarity + * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank, + * unmasked==do not generate the extra vblank). + * + * With DC states enabled the extra vblank happens after link training, + * with DC states disabled it happens immediately upuon PSR exit trigger. + * No idea as of now why there is a difference. HSW/BDW (which don't + * even have DMC) always generate it after link training. Go figure. + * + * Unfortunately CHICKEN_TRANS itself seems to be double buffered + * and thus won't latch until the first vblank. So with DC states + * enabled the register effctively uses the reset value during DC5 + * exit+PSR exit sequence, and thus the bit does nothing until + * latched by the vblank that it was trying to prevent from being + * generated in the first place. So we should probably call this + * one a chicken/egg bit instead on skl+. + * + * In standby mode (as opposed to link-off) this makes no difference + * as the timing generator keeps running the whole time generating + * normal periodic vblanks. + * + * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw, + * and doing so makes the behaviour match the skl+ reset value. + * + * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw): + * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw): + * + * On BDW without this bit is no vblanks whatsoever are + * generated after PSR exit. On HSW this has no apparant effect. + * WaPsrDPRSUnmaskVBlankInSRD says to set this. + * + * The rest of the bits are more self-explanatory and/or + * irrelevant for normal operation. + */ + static bool psr_global_enabled(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -208,13 +293,13 @@ static void psr_event_print(struct drm_i915_private *i915, void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) { - enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); i915_reg_t imr_reg; if (DISPLAY_VER(dev_priv) >= 12) - imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); + imr_reg = TRANS_PSR_IMR(cpu_transcoder); else imr_reg = EDP_PSR_IMR; @@ -232,13 +317,11 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) transcoder_name(cpu_transcoder)); if (DISPLAY_VER(dev_priv) >= 9) { - u32 val = intel_de_read(dev_priv, - PSR_EVENT(cpu_transcoder)); - bool psr2_enabled = intel_dp->psr.psr2_enabled; + u32 val; - intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), - val); - psr_event_print(dev_priv, val, psr2_enabled); + val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0); + + psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled); } } @@ -419,7 +502,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) u32 val = 0; if (DISPLAY_VER(dev_priv) >= 11) - val |= EDP_PSR_TP4_TIME_0US; + val |= EDP_PSR_TP4_TIME_0us; if (dev_priv->params.psr_safest_params) { val |= EDP_PSR_TP1_TIME_2500us; @@ -448,9 +531,9 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) check_tp3_sel: if (intel_dp_source_supports_tps3(dev_priv) && drm_dp_tps3_supported(intel_dp->dpcd)) - val |= EDP_PSR_TP1_TP3_SEL; + val |= EDP_PSR_TP_TP1_TP3; else - val |= EDP_PSR_TP1_TP2_SEL; + val |= EDP_PSR_TP_TP1_TP2; return val; } @@ -476,12 +559,13 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; - val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; + val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); - val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); if (IS_HASWELL(dev_priv)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; @@ -493,9 +577,8 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) & - EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); - intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val); + intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) @@ -534,9 +617,10 @@ static int psr2_block_count(struct intel_dp *intel_dp) static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; - val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; + val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv)) val |= EDP_SU_TRACK_ENABLE; @@ -570,15 +654,13 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ - u32 tmp; + int tmp; tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; - tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; - val |= tmp; + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; - tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; - val |= tmp; + val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); } else if (DISPLAY_VER(dev_priv) >= 12) { val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); @@ -593,31 +675,30 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; - tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); + tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0); + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0); } /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0); + intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0); - intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); } static bool -transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) +transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) - return trans == TRANSCODER_A || trans == TRANSCODER_B; + return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; else if (DISPLAY_VER(dev_priv) >= 12) - return trans == TRANSCODER_A; + return cpu_transcoder == TRANSCODER_A; else - return trans == TRANSCODER_EDP; + return cpu_transcoder == TRANSCODER_EDP; } static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) @@ -633,10 +714,11 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), - EDP_PSR2_IDLE_FRAME_MASK, idle_frames); + intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), + EDP_PSR2_IDLE_FRAMES_MASK, + EDP_PSR2_IDLE_FRAMES(idle_frames)); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) @@ -1074,6 +1156,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_dp *intel_dp; u32 val; @@ -1100,15 +1183,14 @@ void intel_psr_get_config(struct intel_encoder *encoder, goto unlock; if (HAS_PSR2_SEL_FETCH(dev_priv)) { - val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder)); if (val & PSR2_MAN_TRK_CTL_ENABLE) pipe_config->enable_psr2_sel_fetch = true; } if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder)); - val &= EXITLINE_MASK; - pipe_config->dc3co_exitline = val; + val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder)); + pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val); } unlock: mutex_unlock(&intel_dp->psr.lock); @@ -1117,14 +1199,14 @@ unlock: static void intel_psr_activate(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum transcoder transcoder = intel_dp->psr.transcoder; + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - if (transcoder_has_psr2(dev_priv, transcoder)) + if (transcoder_has_psr2(dev_priv, cpu_transcoder)) drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE); + intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE); + intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); lockdep_assert_held(&intel_dp->psr.lock); @@ -1203,7 +1285,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (DISPLAY_VER(dev_priv) < 11) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder), + intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder), mask); psr_irq_control(intel_dp); @@ -1259,6 +1341,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, static bool psr_interrupt_error_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; /* @@ -1270,8 +1353,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) * to avoid any rendering problems. */ if (DISPLAY_VER(dev_priv) >= 12) - val = intel_de_read(dev_priv, - TRANS_PSR_IIR(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder)); else val = intel_de_read(dev_priv, EDP_PSR_IIR); val &= psr_irq_psr_error_bit_get(intel_dp); @@ -1327,17 +1409,16 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, static void intel_psr_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; if (!intel_dp->psr.active) { - if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) { - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(intel_dp->psr.transcoder)); + if (transcoder_has_psr2(dev_priv, cpu_transcoder)) { + val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } - val = intel_de_read(dev_priv, - EDP_PSR_CTL(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; @@ -1345,19 +1426,16 @@ static void intel_psr_exit(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_enabled) { tgl_disallow_dc3co_on_psr2_exit(intel_dp); - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(intel_dp->psr.transcoder)); + + val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), + EDP_PSR2_ENABLE, 0); + drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); - val &= ~EDP_PSR2_ENABLE; - intel_de_write(dev_priv, - EDP_PSR2_CTL(intel_dp->psr.transcoder), val); } else { - val = intel_de_read(dev_priv, - EDP_PSR_CTL(intel_dp->psr.transcoder)); + val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + EDP_PSR_ENABLE, 0); + drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); - val &= ~EDP_PSR_ENABLE; - intel_de_write(dev_priv, - EDP_PSR_CTL(intel_dp->psr.transcoder), val); } intel_dp->psr.active = false; } @@ -1365,14 +1443,15 @@ static void intel_psr_exit(struct intel_dp *intel_dp) static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t psr_status; u32 psr_status_mask; if (intel_dp->psr.psr2_enabled) { - psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder); + psr_status = EDP_PSR2_STATUS(cpu_transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder); + psr_status = EDP_PSR_STATUS(cpu_transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -1385,6 +1464,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) static void intel_psr_disable_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; enum phy phy = intel_port_to_phy(dev_priv, dp_to_dig_port(intel_dp)->base.port); @@ -1411,7 +1491,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, - MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder), + MTL_CLKGATE_DIS_TRANS(cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); else if (IS_ALDERLAKE_P(dev_priv)) intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, @@ -1548,10 +1628,11 @@ static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv) static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), + PSR2_MAN_TRK_CTL(cpu_transcoder), man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | man_trk_ctl_single_full_frame_bit_get(dev_priv) | @@ -1651,6 +1732,7 @@ void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_encoder *encoder; if (!crtc_state->enable_psr2_sel_fetch) @@ -1666,7 +1748,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st break; } - intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder), + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), crtc_state->psr2_man_track_ctl); } @@ -2045,6 +2127,7 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state) static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. @@ -2052,13 +2135,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. */ return intel_de_wait_for_clear(dev_priv, - EDP_PSR2_STATUS(intel_dp->psr.transcoder), + EDP_PSR2_STATUS(cpu_transcoder), EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); } static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* * From bspec: Panel Self Refresh (BDW+) @@ -2067,7 +2151,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * defensive enough to cover everything. */ return intel_de_wait_for_clear(dev_priv, - EDP_PSR_STATUS(intel_dp->psr.transcoder), + EDP_PSR_STATUS(cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, 50); } @@ -2109,6 +2193,7 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t reg; u32 mask; int err; @@ -2117,10 +2202,10 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) return false; if (intel_dp->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder); + reg = EDP_PSR2_STATUS(cpu_transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS(intel_dp->psr.transcoder); + reg = EDP_PSR_STATUS(cpu_transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } @@ -2149,10 +2234,11 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) return -ENOMEM; drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; retry: - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(conn, &conn_iter) { struct drm_connector_state *conn_state; @@ -2281,6 +2367,7 @@ unlock: static void _psr_invalidate_handle(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 val; @@ -2294,7 +2381,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) val = man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | man_trk_ctl_continuos_full_frame(dev_priv); - intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val); + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val); intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = true; } else { @@ -2373,6 +2460,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, static void _psr_flush_handle(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { @@ -2389,7 +2477,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * SU configuration in case update is sent for any reason after * sff bit gets cleared by the HW on next vblank. */ - intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val); intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = false; @@ -2702,6 +2790,7 @@ static void psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; const char *status = "unknown"; u32 val, status_val; @@ -2719,8 +2808,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "BUF_ON", "TG_ON" }; - val = intel_de_read(dev_priv, - EDP_PSR2_STATUS(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; @@ -2735,10 +2823,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = intel_de_read(dev_priv, - EDP_PSR_STATUS(intel_dp->psr.transcoder)); - status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> - EDP_PSR_STATUS_STATE_SHIFT; + val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder)); + status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } @@ -2749,6 +2835,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; intel_wakeref_t wakeref; const char *status; @@ -2780,12 +2867,10 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) } if (psr->psr2_enabled) { - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = intel_de_read(dev_priv, - EDP_PSR_CTL(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", @@ -2797,12 +2882,9 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = intel_de_read(dev_priv, - EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); - val &= EDP_PSR_PERF_CNT_MASK; - seq_printf(m, "Performance counter: %u\n", val); - } + val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder)); + seq_printf(m, "Performance counter: %u\n", + REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); if (psr->debug & I915_PSR_DEBUG_IRQ) { seq_printf(m, "Last attempted entry at: %lld\n", @@ -2819,8 +2901,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) * frame boundary between register reads */ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { - val = intel_de_read(dev_priv, - PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); + val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame)); su_frames_val[frame / 3] = val; } diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 958d8cabc44b..0f7db617425a 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -22,30 +22,36 @@ #define _SRD_CTL_A 0x60800 #define _SRD_CTL_EDP 0x6f800 #define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A) -#define EDP_PSR_ENABLE (1 << 31) -#define BDW_PSR_SINGLE_FRAME (1 << 30) -#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ -#define EDP_PSR_LINK_STANDBY (1 << 27) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25) -#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 -#define EDP_PSR_SKIP_AUX_EXIT (1 << 12) -#define EDP_PSR_TP1_TP2_SEL (0 << 11) -#define EDP_PSR_TP1_TP3_SEL (1 << 11) -#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ -#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) -#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) -#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) -#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) -#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */ -#define EDP_PSR_TP1_TIME_500us (0 << 4) -#define EDP_PSR_TP1_TIME_100us (1 << 4) -#define EDP_PSR_TP1_TIME_2500us (2 << 4) -#define EDP_PSR_TP1_TIME_0us (3 << 4) -#define EDP_PSR_IDLE_FRAME_SHIFT 0 +#define EDP_PSR_ENABLE REG_BIT(31) +#define BDW_PSR_SINGLE_FRAME REG_BIT(30) +#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */ +#define EDP_PSR_LINK_STANDBY REG_BIT(27) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK REG_GENMASK(26, 25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 0) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 1) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 2) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3) +#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20) +#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x)) +#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12) +#define EDP_PSR_TP_MASK REG_BIT(11) +#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0) +#define EDP_PSR_TP_TP1_TP3 REG_FIELD_PREP(EDP_PSR_TP_MASK, 1) +#define EDP_PSR_CRC_ENABLE REG_BIT(10) /* BDW+ */ +#define EDP_PSR_TP2_TP3_TIME_MASK REG_GENMASK(9, 8) +#define EDP_PSR_TP2_TP3_TIME_500us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 0) +#define EDP_PSR_TP2_TP3_TIME_100us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 1) +#define EDP_PSR_TP2_TP3_TIME_2500us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 2) +#define EDP_PSR_TP2_TP3_TIME_0us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 3) +#define EDP_PSR_TP4_TIME_MASK REG_GENMASK(7, 6) +#define EDP_PSR_TP4_TIME_0us REG_FIELD_PREP(EDP_PSR_TP4_TIME_MASK, 3) /* ICL+ */ +#define EDP_PSR_TP1_TIME_MASK REG_GENMASK(5, 4) +#define EDP_PSR_TP1_TIME_500us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 0) +#define EDP_PSR_TP1_TIME_100us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 1) +#define EDP_PSR_TP1_TIME_2500us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 2) +#define EDP_PSR_TP1_TIME_0us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 3) +#define EDP_PSR_IDLE_FRAMES_MASK REG_GENMASK(3, 0) +#define EDP_PSR_IDLE_FRAMES(x) REG_FIELD_PREP(EDP_PSR_IDLE_FRAMES_MASK, (x)) /* * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative @@ -80,81 +86,90 @@ #define _SRD_STATUS_A 0x60840 #define _SRD_STATUS_EDP 0x6f840 #define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A) -#define EDP_PSR_STATUS_STATE_MASK (7 << 29) -#define EDP_PSR_STATUS_STATE_SHIFT 29 -#define EDP_PSR_STATUS_STATE_IDLE (0 << 29) -#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) -#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) -#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29) -#define EDP_PSR_STATUS_STATE_BUFON (4 << 29) -#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29) -#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29) -#define EDP_PSR_STATUS_LINK_MASK (3 << 26) -#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26) -#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26) -#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26) -#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 -#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f -#define EDP_PSR_STATUS_COUNT_SHIFT 16 -#define EDP_PSR_STATUS_COUNT_MASK 0xf -#define EDP_PSR_STATUS_AUX_ERROR (1 << 15) -#define EDP_PSR_STATUS_AUX_SENDING (1 << 12) -#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9) -#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8) -#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) -#define EDP_PSR_STATUS_IDLE_MASK 0xf +#define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29) +#define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0) +#define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1) +#define EDP_PSR_STATUS_STATE_SRDENT REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 2) +#define EDP_PSR_STATUS_STATE_BUFOFF REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 3) +#define EDP_PSR_STATUS_STATE_BUFON REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 4) +#define EDP_PSR_STATUS_STATE_AUXACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 5) +#define EDP_PSR_STATUS_STATE_SRDOFFACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 6) +#define EDP_PSR_STATUS_LINK_MASK REG_GENMASK(27, 26) +#define EDP_PSR_STATUS_LINK_FULL_OFF REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 0) +#define EDP_PSR_STATUS_LINK_FULL_ON REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 1) +#define EDP_PSR_STATUS_LINK_STANDBY REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 2) +#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK REG_GENMASK(24, 20) +#define EDP_PSR_STATUS_COUNT_MASK REG_GENMASK(19, 16) +#define EDP_PSR_STATUS_AUX_ERROR REG_BIT(15) +#define EDP_PSR_STATUS_AUX_SENDING REG_BIT(12) +#define EDP_PSR_STATUS_SENDING_IDLE REG_BIT(9) +#define EDP_PSR_STATUS_SENDING_TP2_TP3 REG_BIT(8) +#define EDP_PSR_STATUS_SENDING_TP1 REG_BIT(4) +#define EDP_PSR_STATUS_IDLE_MASK REG_GENMASK(3, 0) #define _SRD_PERF_CNT_A 0x60844 #define _SRD_PERF_CNT_EDP 0x6f844 #define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A) -#define EDP_PSR_PERF_CNT_MASK 0xffffff +#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0) /* PSR_MASK on SKL+ */ #define _SRD_DEBUG_A 0x60860 #define _SRD_DEBUG_EDP 0x6f860 #define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A) -#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) -#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) -#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) -#define EDP_PSR_DEBUG_MASK_HPD (1 << 25) -#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ -#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ +#define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28) +#define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27) +#define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26) +#define EDP_PSR_DEBUG_MASK_HPD REG_BIT(25) +#define EDP_PSR_DEBUG_MASK_FBC_MODIFY REG_BIT(24) +#define EDP_PSR_DEBUG_MASK_PRIMARY_FLIP REG_BIT(23) /* hsw */ +#define EDP_PSR_DEBUG_MASK_HDCP_ENABLE REG_BIT(22) /* hsw/bdw */ +#define EDP_PSR_DEBUG_MASK_SPRITE_ENABLE REG_BIT(21) /* hsw */ +#define EDP_PSR_DEBUG_MASK_CURSOR_MOVE REG_BIT(20) /* hsw */ +#define EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT REG_BIT(19) /* hsw */ +#define EDP_PSR_DEBUG_MASK_DPST_PHASE_IN REG_BIT(18) /* hsw */ +#define EDP_PSR_DEBUG_MASK_KVMR_SESSION_EN REG_BIT(17) +#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE REG_BIT(16) /* hsw-skl */ +#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN REG_BIT(15) /* skl+ */ +#define EDP_PSR_DEBUG_RFB_UPDATE_SENT REG_BIT(2) /* bdw */ +#define EDP_PSR_DEBUG_ENTRY_COMPLETION REG_BIT(1) /* hsw/bdw */ #define _PSR2_CTL_A 0x60900 #define _PSR2_CTL_EDP 0x6f900 #define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) -#define EDP_PSR2_ENABLE (1 << 31) -#define EDP_SU_TRACK_ENABLE (1 << 30) /* up to adl-p */ -#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28) -#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28) +#define EDP_PSR2_ENABLE REG_BIT(31) +#define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */ +#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28) +#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0) +#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1) #define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */ #define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */ -#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20) -#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20) +#define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20) +#define EDP_MAX_SU_DISABLE_TIME(t) REG_FIELD_PREP(EDP_MAX_SU_DISABLE_TIME, (t)) +#define EDP_PSR2_IO_BUFFER_WAKE_MASK REG_GENMASK(14, 13) #define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8 -#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13) -#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13) +#define EDP_PSR2_IO_BUFFER_WAKE(lines) REG_FIELD_PREP(EDP_PSR2_IO_BUFFER_WAKE_MASK, \ + EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) +#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK REG_GENMASK(15, 13) #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5 -#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13 -#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT) -#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13) +#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) REG_FIELD_PREP(TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK, \ + (lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) +#define EDP_PSR2_FAST_WAKE_MASK REG_GENMASK(12, 11) #define EDP_PSR2_FAST_WAKE_MAX_LINES 8 -#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11) -#define EDP_PSR2_FAST_WAKE_MASK (3 << 11) +#define EDP_PSR2_FAST_WAKE(lines) REG_FIELD_PREP(EDP_PSR2_FAST_WAKE_MASK, \ + EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) +#define TGL_EDP_PSR2_FAST_WAKE_MASK REG_GENMASK(12, 10) #define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5 -#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10 -#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT) -#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10) -#define EDP_PSR2_TP2_TIME_500us (0 << 8) -#define EDP_PSR2_TP2_TIME_100us (1 << 8) -#define EDP_PSR2_TP2_TIME_2500us (2 << 8) -#define EDP_PSR2_TP2_TIME_50us (3 << 8) -#define EDP_PSR2_TP2_TIME_MASK (3 << 8) -#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 -#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4) -#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4) -#define EDP_PSR2_IDLE_FRAME_MASK 0xf -#define EDP_PSR2_IDLE_FRAME_SHIFT 0 +#define TGL_EDP_PSR2_FAST_WAKE(lines) REG_FIELD_PREP(TGL_EDP_PSR2_FAST_WAKE_MASK, \ + (lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) +#define EDP_PSR2_TP2_TIME_MASK REG_GENMASK(9, 8) +#define EDP_PSR2_TP2_TIME_500us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 0) +#define EDP_PSR2_TP2_TIME_100us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 1) +#define EDP_PSR2_TP2_TIME_2500us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 2) +#define EDP_PSR2_TP2_TIME_50us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 3) +#define EDP_PSR2_FRAME_BEFORE_SU_MASK REG_GENMASK(7, 4) +#define EDP_PSR2_FRAME_BEFORE_SU(a) REG_FIELD_PREP(EDP_PSR2_FRAME_BEFORE_SU_MASK, (a)) +#define EDP_PSR2_IDLE_FRAMES_MASK REG_GENMASK(3, 0) +#define EDP_PSR2_IDLE_FRAMES(x) REG_FIELD_PREP(EDP_PSR2_IDLE_FRAMES_MASK, (x)) #define _PSR_EVENT_TRANS_A 0x60848 #define _PSR_EVENT_TRANS_B 0x61848 @@ -162,22 +177,22 @@ #define _PSR_EVENT_TRANS_D 0x63848 #define _PSR_EVENT_TRANS_EDP 0x6f848 #define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) -#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) -#define PSR_EVENT_PSR2_DISABLED (1 << 16) -#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) -#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14) -#define PSR_EVENT_GRAPHICS_RESET (1 << 12) -#define PSR_EVENT_PCH_INTERRUPT (1 << 11) -#define PSR_EVENT_MEMORY_UP (1 << 10) -#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) -#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) -#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) -#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */ -#define PSR_EVENT_HDCP_ENABLE (1 << 4) -#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) -#define PSR_EVENT_VBI_ENABLE (1 << 2) -#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) -#define PSR_EVENT_PSR_DISABLE (1 << 0) +#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17) +#define PSR_EVENT_PSR2_DISABLED REG_BIT(16) +#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15) +#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN REG_BIT(14) +#define PSR_EVENT_GRAPHICS_RESET REG_BIT(12) +#define PSR_EVENT_PCH_INTERRUPT REG_BIT(11) +#define PSR_EVENT_MEMORY_UP REG_BIT(10) +#define PSR_EVENT_FRONT_BUFFER_MODIFY REG_BIT(9) +#define PSR_EVENT_WD_TIMER_EXPIRE REG_BIT(8) +#define PSR_EVENT_PIPE_REGISTERS_UPDATE REG_BIT(6) +#define PSR_EVENT_REGISTER_UPDATE REG_BIT(5) /* Reserved in ICL+ */ +#define PSR_EVENT_HDCP_ENABLE REG_BIT(4) +#define PSR_EVENT_KVMR_SESSION_ENABLE REG_BIT(3) +#define PSR_EVENT_VBI_ENABLE REG_BIT(2) +#define PSR_EVENT_LPSP_MODE_EXIT REG_BIT(1) +#define PSR_EVENT_PSR_DISABLE REG_BIT(0) #define _PSR2_STATUS_A 0x60940 #define _PSR2_STATUS_EDP 0x6f940 diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index e12ba458636c..21f92123c844 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -117,9 +117,6 @@ struct intel_sdvo { enum port port; - bool has_hdmi_monitor; - bool has_hdmi_audio; - /* DDC bus used by this SDVO encoder */ u8 ddc_bus; @@ -1303,10 +1300,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) pipe_config->clock_set = true; } -static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo, +static bool intel_has_hdmi_sink(struct intel_sdvo_connector *intel_sdvo_connector, const struct drm_connector_state *conn_state) { - return sdvo->has_hdmi_monitor && + struct drm_connector *connector = conn_state->connector; + + return intel_sdvo_connector->is_hdmi && + connector->display_info.is_hdmi && READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } @@ -1326,7 +1326,9 @@ static bool intel_sdvo_has_audio(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_sdvo *intel_sdvo = to_sdvo(encoder); + struct drm_connector *connector = conn_state->connector; + struct intel_sdvo_connector *intel_sdvo_connector = + to_intel_sdvo_connector(connector); const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); @@ -1334,7 +1336,8 @@ static bool intel_sdvo_has_audio(struct intel_encoder *encoder, return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - return intel_sdvo->has_hdmi_audio; + return intel_sdvo_connector->is_hdmi && + connector->display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } @@ -1351,6 +1354,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); pipe_config->pipe_bpp = 8*3; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) @@ -1400,7 +1404,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); - pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state); + pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, conn_state); pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state) && @@ -1906,7 +1910,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state); + bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state); int clock = mode->clock; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -2032,36 +2036,35 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) return hweight16(intel_sdvo->caps.output_flags) > 1; } -static struct edid * +static const struct drm_edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); - return drm_get_edid(connector, &sdvo->ddc); + return drm_edid_read_ddc(connector, &sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ -static struct edid * +static const struct drm_edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct drm_i915_private *i915 = to_i915(connector->dev); + struct i2c_adapter *i2c; - return drm_get_edid(connector, - intel_gmbus_get_adapter(dev_priv, - dev_priv->display.vbt.crt_ddc_pin)); + i2c = intel_gmbus_get_adapter(i915, i915->display.vbt.crt_ddc_pin); + + return drm_edid_read_ddc(connector, i2c); } static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); - struct intel_sdvo_connector *intel_sdvo_connector = - to_intel_sdvo_connector(connector); enum drm_connector_status status; - struct edid *edid; + const struct drm_edid *drm_edid; - edid = intel_sdvo_get_edid(connector); + drm_edid = intel_sdvo_get_edid(connector); - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { + if (!drm_edid && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* @@ -2070,15 +2073,15 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; - edid = intel_sdvo_get_edid(connector); - if (edid) + drm_edid = intel_sdvo_get_edid(connector); + if (drm_edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ - if (edid == NULL) + if (!drm_edid) intel_sdvo->ddc_bus = saved_ddc; } @@ -2086,21 +2089,19 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ - if (edid == NULL) - edid = intel_sdvo_get_analog_edid(connector); + if (!drm_edid) + drm_edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; - if (edid != NULL) { + if (drm_edid) { + const struct edid *edid = drm_edid_raw(drm_edid); + /* DDC bus is shared, match EDID to connector type */ - if (edid->input & DRM_EDID_INPUT_DIGITAL) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) status = connector_status_connected; - if (intel_sdvo_connector->is_hdmi) { - intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); - intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); - } - } else + else status = connector_status_disconnected; - kfree(edid); + drm_edid_free(drm_edid); } return status; @@ -2108,8 +2109,9 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, - struct edid *edid) + const struct drm_edid *drm_edid) { + const struct edid *edid = drm_edid_raw(drm_edid); bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_DIGITAL(sdvo); @@ -2147,30 +2149,28 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) intel_sdvo->attached_output = response; - intel_sdvo->has_hdmi_monitor = false; - intel_sdvo->has_hdmi_audio = false; - if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_tmds_sink_detect(connector); else { - struct edid *edid; + const struct drm_edid *drm_edid; /* if we have an edid check it matches the connection */ - edid = intel_sdvo_get_edid(connector); - if (edid == NULL) - edid = intel_sdvo_get_analog_edid(connector); - if (edid != NULL) { + drm_edid = intel_sdvo_get_edid(connector); + if (!drm_edid) + drm_edid = intel_sdvo_get_analog_edid(connector); + if (drm_edid) { if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, - edid)) + drm_edid)) ret = connector_status_connected; else ret = connector_status_disconnected; - kfree(edid); - } else + drm_edid_free(drm_edid); + } else { ret = connector_status_connected; + } } return ret; @@ -2179,13 +2179,13 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) static int intel_sdvo_get_ddc_modes(struct drm_connector *connector) { int num_modes = 0; - struct edid *edid; + const struct drm_edid *drm_edid; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); /* set the bus switch and get the modes */ - edid = intel_sdvo_get_edid(connector); + drm_edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -2193,17 +2193,17 @@ static int intel_sdvo_get_ddc_modes(struct drm_connector *connector) * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ - if (!edid) - edid = intel_sdvo_get_analog_edid(connector); + if (!drm_edid) + drm_edid = intel_sdvo_get_analog_edid(connector); - if (!edid) + if (!drm_edid) return 0; if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), - edid)) - num_modes += intel_connector_update_modes(connector, edid); + drm_edid)) + num_modes += intel_connector_update_modes(connector, drm_edid); - kfree(edid); + drm_edid_free(drm_edid); return num_modes; } diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 1cfb94b5cedb..88ef56b6e0fd 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -2007,11 +2007,16 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state, if (!new_crtc_state->hw.active) return; + /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ + if (!intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state)) + return; + encoder = intel_get_crtc_new_encoder(state, new_crtc_state); intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); #define MPLLB_CHECK(__name) \ - I915_STATE_WARN(mpllb_sw_state->__name != mpllb_hw_state.__name, \ + I915_STATE_WARN(i915, mpllb_sw_state->__name != mpllb_hw_state.__name, \ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ crtc->base.base.id, crtc->base.name, \ __stringify(__name), \ diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 4635c7ad23f9..91c6dca342b2 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -16,16 +16,6 @@ struct intel_crtc_state; struct intel_plane_state; enum pipe; -/* - * FIXME: We should instead only take spinlocks once for the entire update - * instead of once per mmio. - */ -#if IS_ENABLED(CONFIG_PROVE_LOCKING) -#define VBLANK_EVASION_TIME_US 250 -#else -#define VBLANK_EVASION_TIME_US 100 -#endif - struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c index 70a391083751..a76b48ebc2d3 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c +++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c @@ -86,6 +86,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, goto out; } state->acquire_ctx = &ctx; + to_intel_atomic_state(state)->internal = true; while (1) { plane_state = drm_atomic_get_plane_state(state, plane); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3b60995e9dfb..3ebf41859043 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -5,16 +5,25 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_atomic.h" +#include "intel_cx0_phy_regs.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display.h" +#include "intel_display_driver.h" #include "intel_display_power_map.h" #include "intel_display_types.h" #include "intel_dkl_phy_regs.h" +#include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_mg_phy_regs.h" +#include "intel_modeset_lock.h" #include "intel_tc.h" +#define DP_PIN_ASSIGNMENT_C 0x3 +#define DP_PIN_ASSIGNMENT_D 0x4 +#define DP_PIN_ASSIGNMENT_E 0x5 + enum tc_port_mode { TC_PORT_DISCONNECTED, TC_PORT_TBT_ALT, @@ -46,6 +55,7 @@ struct intel_tc_port { enum intel_display_power_domain lock_power_domain; #endif struct delayed_work disconnect_phy_work; + struct delayed_work link_reset_work; int link_refcount; bool legacy_port:1; char port_name[8]; @@ -59,6 +69,7 @@ static enum intel_display_power_domain tc_phy_cold_off_domain(struct intel_tc_port *); static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc); static bool tc_phy_is_ready(struct intel_tc_port *tc); +static bool tc_phy_wait_for_ready(struct intel_tc_port *tc); static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc); static const char *tc_port_mode_name(enum tc_port_mode mode) @@ -141,15 +152,23 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) * * POWER_DOMAIN_TC_COLD_OFF: * ------------------------- - * TGL/legacy, DP-alt modes: + * ICL/DP-alt, TBT mode: + * - TCSS/TBT: block TC-cold power state for using the (direct or + * TBT DP-IN) AUX and main lanes. + * + * TGL/all modes: * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state - * - TCSS/PHY: block TC-cold power state for using the PHY AUX and - * main lanes. + * - TCSS/PHY: block TC-cold power state for using the (direct or + * TBT DP-IN) AUX and main lanes. * - * ICL, TGL, ADLP/TBT mode: - * - TCSS/IOM,FIA access for HPD live state + * ADLP/TBT mode: * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN) * AUX and main lanes. + * + * XELPDP+/all modes: + * - TCSS/IOM,FIA access for PHY ready, owned state + * - TCSS/PHY: block TC-cold power state for using the (direct or + * TBT DP-IN) AUX and main lanes. */ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) { @@ -271,6 +290,27 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } +static int mtl_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref; + u32 pin_mask; + + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) + pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port); + + switch (pin_mask) { + default: + MISSING_CASE(pin_mask); + fallthrough; + case DP_PIN_ASSIGNMENT_D: + return 2; + case DP_PIN_ASSIGNMENT_C: + case DP_PIN_ASSIGNMENT_E: + return 4; + } +} + int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -284,6 +324,9 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) assert_tc_cold_blocked(tc); + if (DISPLAY_VER(i915) >= 14) + return mtl_tc_port_get_pin_assignment_mask(dig_port); + lane_mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) lane_mask = intel_tc_port_get_lane_mask(dig_port); @@ -873,6 +916,200 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = { }; /* + * XELPDP TC PHY handlers + * ---------------------- + */ +static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + enum hpd_pin hpd_pin = dig_port->base.hpd_pin; + u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin]; + u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; + intel_wakeref_t wakeref; + u32 pica_isr; + u32 pch_isr; + u32 mask = 0; + + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) { + pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR); + pch_isr = intel_de_read(i915, SDEISR); + } + + if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK)) + mask |= BIT(TC_PORT_DP_ALT); + if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK)) + mask |= BIT(TC_PORT_TBT_ALT); + + if (tc->legacy_port && (pch_isr & pch_isr_bit)) + mask |= BIT(TC_PORT_LEGACY); + + return mask; +} + +static bool +xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + + assert_tc_cold_blocked(tc); + + return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE; +} + +static bool +xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { + drm_dbg_kms(&i915->drm, + "Port %s: timeout waiting for TCSS power to get %s\n", + enabled ? "enabled" : "disabled", + tc->port_name); + return false; + } + + return true; +} + +static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + u32 val; + + assert_tc_cold_blocked(tc); + + val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + if (enable) + val |= XELPDP_TCSS_POWER_REQUEST; + else + val &= ~XELPDP_TCSS_POWER_REQUEST; + intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); +} + +static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + __xelpdp_tc_phy_enable_tcss_power(tc, enable); + + if ((!tc_phy_wait_for_ready(tc) || + !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) && + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { + if (enable) { + __xelpdp_tc_phy_enable_tcss_power(tc, false); + xelpdp_tc_phy_wait_for_tcss_power(tc, false); + } + + return false; + } + + return true; +} + +static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + u32 val; + + assert_tc_cold_blocked(tc); + + val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + if (take) + val |= XELPDP_TC_PHY_OWNERSHIP; + else + val &= ~XELPDP_TC_PHY_OWNERSHIP; + intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); +} + +static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + + assert_tc_cold_blocked(tc); + + return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP; +} + +static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + intel_wakeref_t tc_cold_wref; + enum intel_display_power_domain domain; + + tc_cold_wref = __tc_cold_block(tc, &domain); + + tc->mode = tc_phy_get_current_mode(tc); + if (tc->mode != TC_PORT_DISCONNECTED) + tc->lock_wakeref = tc_cold_block(tc); + + drm_WARN_ON(&i915->drm, + (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && + !xelpdp_tc_phy_tcss_power_is_enabled(tc)); + + __tc_cold_unblock(tc, domain, tc_cold_wref); +} + +static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) +{ + tc->lock_wakeref = tc_cold_block(tc); + + if (tc->mode == TC_PORT_TBT_ALT) + return true; + + if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) + goto out_unblock_tccold; + + xelpdp_tc_phy_take_ownership(tc, true); + + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) + goto out_release_phy; + + return true; + +out_release_phy: + xelpdp_tc_phy_take_ownership(tc, false); + xelpdp_tc_phy_wait_for_tcss_power(tc, false); + +out_unblock_tccold: + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); + + return false; +} + +static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc) +{ + switch (tc->mode) { + case TC_PORT_LEGACY: + case TC_PORT_DP_ALT: + xelpdp_tc_phy_take_ownership(tc, false); + xelpdp_tc_phy_enable_tcss_power(tc, false); + fallthrough; + case TC_PORT_TBT_ALT: + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); + break; + default: + MISSING_CASE(tc->mode); + } +} + +static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = { + .cold_off_domain = tgl_tc_phy_cold_off_domain, + .hpd_live_status = xelpdp_tc_phy_hpd_live_status, + .is_ready = adlp_tc_phy_is_ready, + .is_owned = xelpdp_tc_phy_is_owned, + .get_hw_state = xelpdp_tc_phy_get_hw_state, + .connect = xelpdp_tc_phy_connect, + .disconnect = xelpdp_tc_phy_disconnect, + .init = adlp_tc_phy_init, +}; + +/* * Generic TC PHY handlers * ----------------------- */ @@ -945,13 +1182,18 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, return is_connected; } -static void tc_phy_wait_for_ready(struct intel_tc_port *tc) +static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); - if (wait_for(tc_phy_is_ready(tc), 100)) + if (wait_for(tc_phy_is_ready(tc), 500)) { drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", tc->port_name); + + return false; + } + + return true; } static enum tc_port_mode @@ -1335,6 +1577,138 @@ bool intel_tc_port_connected(struct intel_encoder *encoder) return is_connected; } +static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) +{ + bool ret; + + mutex_lock(&tc->lock); + + ret = tc->link_refcount && + tc->mode == TC_PORT_DP_ALT && + intel_tc_port_needs_reset(tc); + + mutex_unlock(&tc->lock); + + return ret; +} + +bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (!intel_phy_is_tc(i915, phy)) + return false; + + return __intel_tc_port_link_needs_reset(to_tc_port(dig_port)); +} + +static int reset_link_commit(struct intel_tc_port *tc, + struct intel_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base); + struct intel_crtc *crtc; + u8 pipe_mask; + int ret; + + ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx); + if (ret) + return ret; + + ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); + if (ret) + return ret; + + if (!pipe_mask) + return 0; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->uapi.connectors_changed = true; + } + + if (!__intel_tc_port_link_needs_reset(tc)) + return 0; + + return drm_atomic_commit(&state->base); +} + +static int reset_link(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *_state; + struct intel_atomic_state *state; + int ret; + + _state = drm_atomic_state_alloc(&i915->drm); + if (!_state) + return -ENOMEM; + + state = to_intel_atomic_state(_state); + state->internal = true; + + intel_modeset_lock_ctx_retry(&ctx, state, 0, ret) + ret = reset_link_commit(tc, state, &ctx); + + drm_atomic_state_put(&state->base); + + return ret; +} + +static void intel_tc_port_link_reset_work(struct work_struct *work) +{ + struct intel_tc_port *tc = + container_of(work, struct intel_tc_port, link_reset_work.work); + struct drm_i915_private *i915 = tc_to_i915(tc); + int ret; + + if (!__intel_tc_port_link_needs_reset(tc)) + return; + + mutex_lock(&i915->drm.mode_config.mutex); + + drm_dbg_kms(&i915->drm, + "Port %s: TypeC DP-alt sink disconnected, resetting link\n", + tc->port_name); + ret = reset_link(tc); + drm_WARN_ON(&i915->drm, ret); + + mutex_unlock(&i915->drm.mode_config.mutex); +} + +bool intel_tc_port_link_reset(struct intel_digital_port *dig_port) +{ + if (!intel_tc_port_link_needs_reset(dig_port)) + return false; + + queue_delayed_work(system_unbound_wq, + &to_tc_port(dig_port)->link_reset_work, + msecs_to_jiffies(2000)); + + return true; +} + +void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!intel_phy_is_tc(i915, phy)) + return; + + cancel_delayed_work(&tc->link_reset_work); +} + static void __intel_tc_port_lock(struct intel_tc_port *tc, int required_lanes) { @@ -1382,11 +1756,19 @@ static void intel_tc_port_disconnect_phy_work(struct work_struct *work) * * Flush the delayed work disconnecting an idle PHY. */ -void intel_tc_port_flush_work(struct intel_digital_port *dig_port) +static void intel_tc_port_flush_work(struct intel_digital_port *dig_port) { flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work); } +void intel_tc_port_suspend(struct intel_digital_port *dig_port) +{ + struct intel_tc_port *tc = to_tc_port(dig_port); + + cancel_delayed_work_sync(&tc->link_reset_work); + intel_tc_port_flush_work(dig_port); +} + void intel_tc_port_unlock(struct intel_digital_port *dig_port) { struct intel_tc_port *tc = to_tc_port(dig_port); @@ -1423,6 +1805,14 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port) intel_tc_port_lock(dig_port); __intel_tc_port_put_link(tc); intel_tc_port_unlock(dig_port); + + /* + * The firmware will not update the HPD status of other TypeC ports + * that are active in DP-alt mode with their sink disconnected, until + * this port is disabled and its PHY gets disconnected. Make sure this + * happens in a timely manner by disconnecting the PHY synchronously. + */ + intel_tc_port_flush_work(dig_port); } int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) @@ -1442,7 +1832,9 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) dig_port->tc = tc; tc->dig_port = dig_port; - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(i915) >= 14) + tc->phy_ops = &xelpdp_tc_phy_ops; + else if (DISPLAY_VER(i915) >= 13) tc->phy_ops = &adlp_tc_phy_ops; else if (DISPLAY_VER(i915) >= 12) tc->phy_ops = &tgl_tc_phy_ops; @@ -1453,7 +1845,9 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) "%c/TC#%d", port_name(port), tc_port + 1); mutex_init(&tc->lock); + /* TODO: Combine the two works */ INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); + INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work); tc->legacy_port = is_legacy; tc->mode = TC_PORT_DISCONNECTED; tc->link_refcount = 0; @@ -1467,7 +1861,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) void intel_tc_port_cleanup(struct intel_digital_port *dig_port) { - intel_tc_port_flush_work(dig_port); + intel_tc_port_suspend(dig_port); kfree(dig_port->tc); dig_port->tc = NULL; diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index dd0810f9ea95..3b16491925fa 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -30,11 +30,14 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state); void intel_tc_port_lock(struct intel_digital_port *dig_port); void intel_tc_port_unlock(struct intel_digital_port *dig_port); -void intel_tc_port_flush_work(struct intel_digital_port *dig_port); +void intel_tc_port_suspend(struct intel_digital_port *dig_port); void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_put_link(struct intel_digital_port *dig_port); bool intel_tc_port_ref_held(struct intel_digital_port *dig_port); +bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port); +bool intel_tc_port_link_reset(struct intel_digital_port *dig_port); +void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port); int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); void intel_tc_port_cleanup(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 557ec5b62afa..36b479b46b60 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -35,14 +35,15 @@ #include <drm/drm_edid.h> #include "i915_drv.h" -#include "i915_irq.h" #include "i915_reg.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dpll.h" #include "intel_hotplug.h" +#include "intel_load_detect.h" #include "intel_tv.h" #include "intel_tv_regs.h" @@ -1205,6 +1206,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n"); @@ -1722,21 +1724,21 @@ intel_tv_detect(struct drm_connector *connector, return connector_status_disconnected; if (force) { - struct intel_load_detect_pipe tmp; - int ret; + struct drm_atomic_state *state; - ret = intel_get_load_detect_pipe(connector, &tmp, ctx); - if (ret < 0) - return ret; + state = intel_load_detect_get_pipe(connector, ctx); + if (IS_ERR(state)) + return PTR_ERR(state); - if (ret > 0) { + if (state) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp, ctx); + intel_load_detect_release_pipe(connector, state, ctx); status = type < 0 ? connector_status_disconnected : connector_status_connected; - } else + } else { status = connector_status_unknown; + } if (status == connector_status_connected) { intel_tv->type = type; diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index f8bf9810527d..f5659ebd08eb 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -340,8 +340,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, * matches how the scanline counter based position works since * the scanline counter doesn't count the two half lines. */ - if (position >= vtotal) - position = vtotal - 1; + position = min(position, vtotal - 1); /* * Start of vblank interrupt is triggered at start of hsync, @@ -488,21 +487,27 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) } } -void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) +void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, + bool vrr_enable) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u8 mode_flags = crtc_state->mode_flags; struct drm_display_mode adjusted_mode; int vmax_vblank_start = 0; unsigned long irqflags; drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); - if (crtc_state->vrr.enable) { + if (vrr_enable) { + drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0); + adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); + } else { + mode_flags &= ~I915_MODE_FLAG_VRR; } /* @@ -524,7 +529,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) crtc->vmax_vblank_start = vmax_vblank_start; - crtc->mode_flags = crtc_state->mode_flags; + crtc->mode_flags = mode_flags; crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h index 0884db7e76ae..08e706b29149 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.h +++ b/drivers/gpu/drm/i915/display/intel_vblank.h @@ -20,6 +20,7 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, int intel_get_crtc_scanline(struct intel_crtc *crtc); void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc); void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc); -void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state); +void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, + bool vrr_enable); #endif /* __INTEL_VBLANK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 8e787c13d26d..bd9116d2cd76 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -19,327 +19,6 @@ #include "intel_vdsc.h" #include "intel_vdsc_regs.h" -enum ROW_INDEX_BPP { - ROW_INDEX_6BPP = 0, - ROW_INDEX_8BPP, - ROW_INDEX_10BPP, - ROW_INDEX_12BPP, - ROW_INDEX_15BPP, - MAX_ROW_INDEX -}; - -enum COLUMN_INDEX_BPC { - COLUMN_INDEX_8BPC = 0, - COLUMN_INDEX_10BPC, - COLUMN_INDEX_12BPC, - COLUMN_INDEX_14BPC, - COLUMN_INDEX_16BPC, - MAX_COLUMN_INDEX -}; - -/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */ -static const u16 rc_buf_thresh[] = { - 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, - 7744, 7872, 8000, 8064 -}; - -struct rc_parameters { - u16 initial_xmit_delay; - u8 first_line_bpg_offset; - u16 initial_offset; - u8 flatness_min_qp; - u8 flatness_max_qp; - u8 rc_quant_incr_limit0; - u8 rc_quant_incr_limit1; - struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; -}; - -/* - * Selected Rate Control Related Parameter Recommended Values - * from DSC_v1.11 spec & C Model release: DSC_model_20161212 - */ -static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = { -{ - /* 6BPP/8BPC */ - { 768, 15, 6144, 3, 13, 11, 11, { - { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 }, - { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 }, - { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 }, - { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 } - } - }, - /* 6BPP/10BPC */ - { 768, 15, 6144, 7, 17, 15, 15, { - { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 }, - { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 }, - { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 }, - { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 }, - { 17, 18, -12 } - } - }, - /* 6BPP/12BPC */ - { 768, 15, 6144, 11, 21, 19, 19, { - { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 }, - { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 }, - { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 }, - { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 }, - { 21, 22, -12 } - } - }, - /* 6BPP/14BPC */ - { 768, 15, 6144, 15, 25, 23, 27, { - { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 }, - { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 }, - { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 }, - { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 }, - { 25, 26, -12 } - } - }, - /* 6BPP/16BPC */ - { 768, 15, 6144, 19, 29, 27, 27, { - { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 }, - { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 }, - { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 }, - { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 }, - { 29, 30, -12 } - } - }, -}, -{ - /* 8BPP/8BPC */ - { 512, 12, 6144, 3, 12, 11, 11, { - { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, - { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, - { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 }, - { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } - } - }, - /* 8BPP/10BPC */ - { 512, 12, 6144, 7, 16, 15, 15, { - { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, - { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, - { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, - { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } - } - }, - /* 8BPP/12BPC */ - { 512, 12, 6144, 11, 20, 19, 19, { - { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, - { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, - { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, - { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, - { 21, 23, -12 } - } - }, - /* 8BPP/14BPC */ - { 512, 12, 6144, 15, 24, 23, 23, { - { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, - { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, - { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, - { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, - { 24, 25, -12 } - } - }, - /* 8BPP/16BPC */ - { 512, 12, 6144, 19, 28, 27, 27, { - { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, - { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, - { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, - { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, - { 28, 29, -12 } - } - }, -}, -{ - /* 10BPP/8BPC */ - { 410, 15, 5632, 3, 12, 11, 11, { - { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, - { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, - { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, - { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } - } - }, - /* 10BPP/10BPC */ - { 410, 15, 5632, 7, 16, 15, 15, { - { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, - { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, - { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, - { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } - } - }, - /* 10BPP/12BPC */ - { 410, 15, 5632, 11, 20, 19, 19, { - { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, - { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, - { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, - { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, - { 19, 20, -12 } - } - }, - /* 10BPP/14BPC */ - { 410, 15, 5632, 15, 24, 23, 23, { - { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, - { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, - { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, - { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, - { 23, 24, -12 } - } - }, - /* 10BPP/16BPC */ - { 410, 15, 5632, 19, 28, 27, 27, { - { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, - { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, - { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, - { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, - { 27, 28, -12 } - } - }, -}, -{ - /* 12BPP/8BPC */ - { 341, 15, 2048, 3, 12, 11, 11, { - { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, - { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, - { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, - { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } - } - }, - /* 12BPP/10BPC */ - { 341, 15, 2048, 7, 16, 15, 15, { - { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, - { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, - { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, - { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } - } - }, - /* 12BPP/12BPC */ - { 341, 15, 2048, 11, 20, 19, 19, { - { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, - { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, - { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, - { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, - { 21, 23, -12 } - } - }, - /* 12BPP/14BPC */ - { 341, 15, 2048, 15, 24, 23, 23, { - { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, - { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, - { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, - { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, - { 22, 23, -12 } - } - }, - /* 12BPP/16BPC */ - { 341, 15, 2048, 19, 28, 27, 27, { - { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, - { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, - { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, - { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, - { 26, 27, -12 } - } - }, -}, -{ - /* 15BPP/8BPC */ - { 273, 15, 2048, 3, 12, 11, 11, { - { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, - { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, - { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, - { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } - } - }, - /* 15BPP/10BPC */ - { 273, 15, 2048, 7, 16, 15, 15, { - { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, - { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, - { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, - { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } - } - }, - /* 15BPP/12BPC */ - { 273, 15, 2048, 11, 20, 19, 19, { - { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, - { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, - { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, - { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, - { 16, 17, -12 } - } - }, - /* 15BPP/14BPC */ - { 273, 15, 2048, 15, 24, 23, 23, { - { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, - { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, - { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, - { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, - { 20, 21, -12 } - } - }, - /* 15BPP/16BPC */ - { 273, 15, 2048, 19, 28, 27, 27, { - { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, - { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, - { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, - { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, - { 24, 25, -12 } - } - } -} - -}; - -static int get_row_index_for_rc_params(u16 compressed_bpp) -{ - switch (compressed_bpp) { - case 6: - return ROW_INDEX_6BPP; - case 8: - return ROW_INDEX_8BPP; - case 10: - return ROW_INDEX_10BPP; - case 12: - return ROW_INDEX_12BPP; - case 15: - return ROW_INDEX_15BPP; - default: - return -EINVAL; - } -} - -static int get_column_index_for_rc_params(u8 bits_per_component) -{ - switch (bits_per_component) { - case 8: - return COLUMN_INDEX_8BPC; - case 10: - return COLUMN_INDEX_10BPC; - case 12: - return COLUMN_INDEX_12BPC; - case 14: - return COLUMN_INDEX_14BPC; - case 16: - return COLUMN_INDEX_16BPC; - default: - return -EINVAL; - } -} - -static const struct rc_parameters *get_rc_params(u16 compressed_bpp, - u8 bits_per_component) -{ - int row_index, column_index; - - row_index = get_row_index_for_rc_params(compressed_bpp); - if (row_index < 0) - return NULL; - - column_index = get_column_index_for_rc_params(bits_per_component); - if (column_index < 0) - return NULL; - - return &rc_parameters[row_index][column_index]; -} - bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) { const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -374,8 +53,7 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder) } static void -calculate_rc_params(struct rc_parameters *rc, - struct drm_dsc_config *vdsc_cfg) +calculate_rc_params(struct drm_dsc_config *vdsc_cfg) { int bpc = vdsc_cfg->bits_per_component; int bpp = vdsc_cfg->bits_per_pixel >> 4; @@ -395,56 +73,57 @@ calculate_rc_params(struct rc_parameters *rc, u32 res, buf_i, bpp_i; if (vdsc_cfg->slice_height >= 8) - rc->first_line_bpg_offset = + vdsc_cfg->first_line_bpg_offset = 12 + DIV_ROUND_UP((9 * min(34, vdsc_cfg->slice_height - 8)), 100); else - rc->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1); + vdsc_cfg->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1); /* Our hw supports only 444 modes as of today */ if (bpp >= 12) - rc->initial_offset = 2048; + vdsc_cfg->initial_offset = 2048; else if (bpp >= 10) - rc->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2); + vdsc_cfg->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2); else if (bpp >= 8) - rc->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2); + vdsc_cfg->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2); else - rc->initial_offset = 6144; + vdsc_cfg->initial_offset = 6144; /* initial_xmit_delay = rc_model_size/2/compression_bpp */ - rc->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp); + vdsc_cfg->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp); - rc->flatness_min_qp = 3 + qp_bpc_modifier; - rc->flatness_max_qp = 12 + qp_bpc_modifier; + vdsc_cfg->flatness_min_qp = 3 + qp_bpc_modifier; + vdsc_cfg->flatness_max_qp = 12 + qp_bpc_modifier; - rc->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; - rc->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; + vdsc_cfg->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; + vdsc_cfg->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; bpp_i = (2 * (bpp - 6)); for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { + u8 range_bpg_offset; + /* Read range_minqp and range_max_qp from qp tables */ - rc->rc_range_params[buf_i].range_min_qp = + vdsc_cfg->rc_range_params[buf_i].range_min_qp = intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); - rc->rc_range_params[buf_i].range_max_qp = + vdsc_cfg->rc_range_params[buf_i].range_max_qp = intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); - /* Calculate range_bgp_offset */ + /* Calculate range_bpg_offset */ if (bpp <= 6) { - rc->rc_range_params[buf_i].range_bpg_offset = ofs_und6[buf_i]; + range_bpg_offset = ofs_und6[buf_i]; } else if (bpp <= 8) { res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2); - rc->rc_range_params[buf_i].range_bpg_offset = - ofs_und6[buf_i] + res; + range_bpg_offset = ofs_und6[buf_i] + res; } else if (bpp <= 12) { - rc->rc_range_params[buf_i].range_bpg_offset = - ofs_und8[buf_i]; + range_bpg_offset = ofs_und8[buf_i]; } else if (bpp <= 15) { res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3); - rc->rc_range_params[buf_i].range_bpg_offset = - ofs_und12[buf_i] + res; + range_bpg_offset = ofs_und12[buf_i] + res; } else { - rc->rc_range_params[buf_i].range_bpg_offset = - ofs_und15[buf_i]; + range_bpg_offset = ofs_und15[buf_i]; } + + vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = + range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } } @@ -477,10 +156,8 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; u16 compressed_bpp = pipe_config->dsc.compressed_bpp; - const struct rc_parameters *rc_params; - struct rc_parameters *rc = NULL; int err; - u8 i = 0; + int ret; vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, @@ -539,23 +216,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; - for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { - /* - * six 0s are appended to the lsb of each threshold value - * internally in h/w. - * Only 8 bits are allowed for programming RcBufThreshold - */ - vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6; - } - - /* - * For 6bpp, RC Buffer threshold 12 and 13 need a different value - * as per C Model - */ - if (compressed_bpp == 6) { - vdsc_cfg->rc_buf_thresh[12] = 0x7C; - vdsc_cfg->rc_buf_thresh[13] = 0x7D; - } + drm_dsc_set_rc_buf_thresh(vdsc_cfg); /* * From XE_LPD onwards we supports compression bpps in steps of 1 @@ -563,39 +224,31 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) * parameters */ if (DISPLAY_VER(dev_priv) >= 13) { - rc = kmalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - calculate_rc_params(rc, vdsc_cfg); - rc_params = rc; + calculate_rc_params(vdsc_cfg); } else { - rc_params = get_rc_params(compressed_bpp, - vdsc_cfg->bits_per_component); - if (!rc_params) - return -EINVAL; - } + if ((compressed_bpp == 8 || + compressed_bpp == 12) && + (vdsc_cfg->bits_per_component == 8 || + vdsc_cfg->bits_per_component == 10 || + vdsc_cfg->bits_per_component == 12)) + ret = drm_dsc_setup_rc_params(vdsc_cfg, DRM_DSC_1_1_PRE_SCR); + else + ret = drm_dsc_setup_rc_params(vdsc_cfg, DRM_DSC_1_2_444); - vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset; - vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay; - vdsc_cfg->initial_offset = rc_params->initial_offset; - vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp; - vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp; - vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0; - vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1; + if (ret) + return ret; - for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { - vdsc_cfg->rc_range_params[i].range_min_qp = - rc_params->rc_range_params[i].range_min_qp; - vdsc_cfg->rc_range_params[i].range_max_qp = - rc_params->rc_range_params[i].range_max_qp; /* - * Range BPG Offset uses 2's complement and is only a 6 bits. So - * mask it to get only 6 bits. + * FIXME: verify that the hardware actually needs these + * modifications rather than them being simple typos. */ - vdsc_cfg->rc_range_params[i].range_bpg_offset = - rc_params->rc_range_params[i].range_bpg_offset & - DSC_RANGE_BPG_OFFSET_MASK; + if (compressed_bpp == 6 && + vdsc_cfg->bits_per_component == 8) + vdsc_cfg->rc_quant_incr_limit1 = 23; + + if (compressed_bpp == 8 && + vdsc_cfg->bits_per_component == 14) + vdsc_cfg->rc_range_params[0].range_bpg_offset = 0; } /* @@ -612,8 +265,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); - kfree(rc); - return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 4228f26b4c11..88e4759b538b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -114,9 +114,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return; - if (!crtc_state->uapi.vrr_enabled) - return; - vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); vmax = adjusted_mode->crtc_clock * 1000 / @@ -135,7 +132,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.vmin = vmin - 1; crtc_state->vrr.vmax = vmax; - crtc_state->vrr.enable = true; crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1; @@ -152,7 +148,10 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, crtc_state->framestart_delay - 1); } - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + if (crtc_state->uapi.vrr_enabled) { + crtc_state->vrr.enable = true; + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + } } static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) @@ -168,23 +167,28 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) VRR_CTL_PIPELINE_FULL_OVERRIDE; } -void intel_vrr_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (!crtc_state->vrr.enable) + /* + * TRANS_SET_CONTEXT_LATENCY with VRR enabled + * requires this chicken bit on ADL/DG2. + */ + if (DISPLAY_VER(dev_priv) == 13) + intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), + 0, PIPE_VBLANK_WITH_DELAY); + + if (!crtc_state->vrr.flipline) { + intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0); return; + } intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1); intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1); intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state)); intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1); - intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN); - - intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), - VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); } void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) @@ -212,6 +216,19 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; } +void intel_vrr_enable(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!crtc_state->vrr.enable) + return; + + intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN); + intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), + VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); +} + void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); @@ -225,22 +242,18 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) trans_vrr_ctl(old_crtc_state)); intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); - intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0); - intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0); } -void intel_vrr_get_config(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 trans_vrr_ctl; trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder)); + crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; - if (!crtc_state->vrr.enable) - return; if (DISPLAY_VER(dev_priv) >= 13) crtc_state->vrr.guardband = @@ -249,10 +262,13 @@ void intel_vrr_get_config(struct intel_crtc *crtc, if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl); - if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) + + if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) { crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1; - crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1; - crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1; + crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1; + crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1; + } - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + if (crtc_state->vrr.enable) + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 9fda1135b0dd..de16960c4929 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -11,22 +11,18 @@ struct drm_connector_state; struct intel_atomic_state; struct intel_connector; -struct intel_crtc; struct intel_crtc_state; -struct intel_dp; -struct intel_encoder; bool intel_vrr_is_capable(struct intel_connector *connector); void intel_vrr_check_modeset(struct intel_atomic_state *state); void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state); -void intel_vrr_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state); +void intel_vrr_enable(const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state); void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state); -void intel_vrr_get_config(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state); +void intel_vrr_get_config(struct intel_crtc_state *crtc_state); int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 0e7e014fcc71..1e7c97243fcf 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -348,6 +348,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, return 0; } +static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, + int num_scalers_need, struct intel_crtc *intel_crtc, + const char *name, int idx, + struct intel_plane_state *plane_state, + int *scaler_id) +{ + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int j; + u32 mode; + + if (*scaler_id < 0) { + /* find a free scaler */ + for (j = 0; j < intel_crtc->num_scalers; j++) { + if (scaler_state->scalers[j].in_use) + continue; + + *scaler_id = j; + scaler_state->scalers[*scaler_id].in_use = 1; + break; + } + } + + if (drm_WARN(&dev_priv->drm, *scaler_id < 0, + "Cannot find scaler for %s:%d\n", name, idx)) + return -EINVAL; + + /* set scaler mode */ + if (plane_state && plane_state->hw.fb && + plane_state->hw.fb->format->is_yuv && + plane_state->hw.fb->format->num_planes > 1) { + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + + if (DISPLAY_VER(dev_priv) == 9) { + mode = SKL_PS_SCALER_MODE_NV12; + } else if (icl_is_hdr_plane(dev_priv, plane->id)) { + /* + * On gen11+'s HDR planes we only use the scaler for + * scaling. They have a dedicated chroma upsampler, so + * we don't need the scaler to upsample the UV plane. + */ + mode = PS_SCALER_MODE_NORMAL; + } else { + struct intel_plane *linked = + plane_state->planar_linked_plane; + + mode = PS_SCALER_MODE_PLANAR; + + if (linked) + mode |= PS_BINDING_Y_PLANE(linked->id); + } + } else if (DISPLAY_VER(dev_priv) >= 10) { + mode = PS_SCALER_MODE_NORMAL; + } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { + /* + * when only 1 scaler is in use on a pipe with 2 scalers + * scaler 0 operates in high quality (HQ) mode. + * In this case use scaler 0 to take advantage of HQ mode + */ + scaler_state->scalers[*scaler_id].in_use = 0; + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + mode = SKL_PS_SCALER_MODE_HQ; + } else { + mode = SKL_PS_SCALER_MODE_DYN; + } + + /* + * FIXME: we should also check the scaler factors for pfit, so + * this shouldn't be tied directly to planes. + */ + if (plane_state && plane_state->hw.fb) { + const struct drm_framebuffer *fb = plane_state->hw.fb; + const struct drm_rect *src = &plane_state->uapi.src; + const struct drm_rect *dst = &plane_state->uapi.dst; + int hscale, vscale, max_vscale, max_hscale; + + /* + * FIXME: When two scalers are needed, but only one of + * them needs to downscale, we should make sure that + * the one that needs downscaling support is assigned + * as the first scaler, so we don't reject downscaling + * unnecessarily. + */ + + if (DISPLAY_VER(dev_priv) >= 14) { + /* + * On versions 14 and up, only the first + * scaler supports a vertical scaling factor + * of more than 1.0, while a horizontal + * scaling factor of 3.0 is supported. + */ + max_hscale = 0x30000 - 1; + if (*scaler_id == 0) + max_vscale = 0x30000 - 1; + else + max_vscale = 0x10000; + + } else if (DISPLAY_VER(dev_priv) >= 10 || + !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { + max_hscale = 0x30000 - 1; + max_vscale = 0x30000 - 1; + } else { + max_hscale = 0x20000 - 1; + max_vscale = 0x20000 - 1; + } + + /* + * FIXME: We should change the if-else block above to + * support HQ vs dynamic scaler properly. + */ + + /* Check if required scaling is within limits */ + hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); + vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); + + if (hscale < 0 || vscale < 0) { + drm_dbg_kms(&dev_priv->drm, + "Scaler %d doesn't support required plane scaling\n", + *scaler_id); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dst, false); + + return -EINVAL; + } + } + + drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", + intel_crtc->pipe, *scaler_id, name, idx); + scaler_state->scalers[*scaler_id].mode = mode; + + return 0; +} + +/** + * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests + * @dev_priv: i915 device + * @intel_crtc: intel crtc + * @crtc_state: incoming crtc_state to validate and setup scalers + * + * This function sets up scalers based on staged scaling requests for + * a @crtc and its planes. It is called from crtc level check path. If request + * is a supportable request, it attaches scalers to requested planes and crtc. + * + * This function takes into account the current scaler(s) in use by any planes + * not being part of this atomic state + * + * Returns: + * 0 - scalers were setup successfully + * error code - otherwise + */ +int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_plane *plane = NULL; + struct intel_plane *intel_plane; + struct intel_plane_state *plane_state = NULL; + struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + struct drm_atomic_state *drm_state = crtc_state->uapi.state; + struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); + int num_scalers_need; + int i; + + num_scalers_need = hweight32(scaler_state->scaler_users); + + /* + * High level flow: + * - staged scaler requests are already in scaler_state->scaler_users + * - check whether staged scaling requests can be supported + * - add planes using scalers that aren't in current transaction + * - assign scalers to requested users + * - as part of plane commit, scalers will be committed + * (i.e., either attached or detached) to respective planes in hw + * - as part of crtc_commit, scaler will be either attached or detached + * to crtc in hw + */ + + /* fail if required scalers > available scalers */ + if (num_scalers_need > intel_crtc->num_scalers) { + drm_dbg_kms(&dev_priv->drm, + "Too many scaling requests %d > %d\n", + num_scalers_need, intel_crtc->num_scalers); + return -EINVAL; + } + + /* walkthrough scaler_users bits and start assigning scalers */ + for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { + int *scaler_id; + const char *name; + int idx, ret; + + /* skip if scaler not required */ + if (!(scaler_state->scaler_users & (1 << i))) + continue; + + if (i == SKL_CRTC_INDEX) { + name = "CRTC"; + idx = intel_crtc->base.base.id; + + /* panel fitter case: assign as a crtc scaler */ + scaler_id = &scaler_state->scaler_id; + } else { + name = "PLANE"; + + /* plane scaler case: assign as a plane scaler */ + /* find the plane that set the bit as scaler_user */ + plane = drm_state->planes[i].ptr; + + /* + * to enable/disable hq mode, add planes that are using scaler + * into this transaction + */ + if (!plane) { + struct drm_plane_state *state; + + /* + * GLK+ scalers don't have a HQ mode so it + * isn't necessary to change between HQ and dyn mode + * on those platforms. + */ + if (DISPLAY_VER(dev_priv) >= 10) + continue; + + plane = drm_plane_from_index(&dev_priv->drm, i); + state = drm_atomic_get_plane_state(drm_state, plane); + if (IS_ERR(state)) { + drm_dbg_kms(&dev_priv->drm, + "Failed to add [PLANE:%d] to drm_state\n", + plane->base.id); + return PTR_ERR(state); + } + } + + intel_plane = to_intel_plane(plane); + idx = plane->base.id; + + /* plane on different crtc cannot be a scaler user of this crtc */ + if (drm_WARN_ON(&dev_priv->drm, + intel_plane->pipe != intel_crtc->pipe)) + continue; + + plane_state = intel_atomic_get_new_plane_state(intel_state, + intel_plane); + scaler_id = &plane_state->scaler_id; + } + + ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need, + intel_crtc, name, idx, + plane_state, scaler_id); + if (ret < 0) + return ret; + } + + return 0; +} + static int glk_coef_tap(int i) { return i % 7; @@ -401,7 +658,7 @@ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, int i; intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), - PS_COEE_INDEX_AUTO_INC); + PS_COEF_INDEX_AUTO_INC); for (i = 0; i < 17 * 7; i += 2) { u32 tmp; @@ -484,8 +741,8 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state) id = scaler_state->scaler_id; - ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); - ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode; + ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode | + skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); skl_scaler_setup_filter(dev_priv, pipe, id, 0, crtc_state->hw.scaling_filter); @@ -497,9 +754,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state) intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), - x << 16 | y); + PS_WIN_XPOS(x) | PS_WIN_YPOS(y)); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), - width << 16 | height); + PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height)); } void @@ -547,8 +804,8 @@ skl_program_plane_scaler(struct intel_plane *plane, uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); } - ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); - ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode; + ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode | + skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0, plane_state->hw.scaling_filter); @@ -559,9 +816,9 @@ skl_program_plane_scaler(struct intel_plane *plane, intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id), - (crtc_x << 16) | crtc_y); + PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y)); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id), - (crtc_w << 16) | crtc_h); + PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h)); } static void skl_detach_scaler(struct intel_crtc *crtc, int id) @@ -599,3 +856,42 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) for (i = 0; i < crtc->num_scalers; i++) skl_detach_scaler(crtc, i); } + +void skl_scaler_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; + int id = -1; + int i; + + /* find scaler attached to this pipe */ + for (i = 0; i < crtc->num_scalers; i++) { + u32 ctl, pos, size; + + ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); + if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE)) + continue; + + id = i; + crtc_state->pch_pfit.enabled = true; + + pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); + size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); + + drm_rect_init(&crtc_state->pch_pfit.dst, + REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), + REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), + REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), + REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); + + scaler_state->scalers[i].in_use = true; + break; + } + + scaler_state->scaler_id = id; + if (id >= 0) + scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); + else + scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); +} diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h index 0097d5d08e10..63f93ca03c89 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.h +++ b/drivers/gpu/drm/i915/display/skl_scaler.h @@ -8,17 +8,22 @@ #include <linux/types.h> enum drm_scaling_filter; +enum pipe; struct drm_i915_private; +struct intel_crtc; struct intel_crtc_state; -struct intel_plane_state; struct intel_plane; -enum pipe; +struct intel_plane_state; int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); +int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *crtc_state); + void skl_pfit_enable(const struct intel_crtc_state *crtc_state); void skl_program_plane_scaler(struct intel_plane *plane, @@ -26,4 +31,7 @@ void skl_program_plane_scaler(struct intel_plane *plane, const struct intel_plane_state *plane_state); void skl_detach_scalers(const struct intel_crtc_state *crtc_state); void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); + +void skl_scaler_get_config(struct intel_crtc_state *crtc_state); + #endif diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 8ea0598a5a07..36070d86550f 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -9,10 +9,10 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" -#include "i915_irq.h" #include "i915_reg.h" #include "intel_atomic_plane.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" @@ -789,6 +789,14 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier) PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: + return PLANE_CTL_TILED_4 | + PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | + PLANE_CTL_CLEAR_COLOR_DISABLE; + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: + return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: + return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; @@ -1936,7 +1944,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) + if ((DISPLAY_RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) return false; return plane_id == PLANE_PRIMARY; @@ -2160,6 +2168,11 @@ skl_plane_disable_flip_done(struct intel_plane *plane) static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, enum pipe pipe, enum plane_id plane_id) { + /* Wa_14017240301 */ + if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || + IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) + return false; + /* Wa_22011186057 */ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) return false; @@ -2441,12 +2454,17 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, case PLANE_CTL_TILED_Y: plane_config->tiling = I915_TILING_Y; if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) + fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS; + else if (DISPLAY_VER(dev_priv) >= 12) fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) - fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; + if (DISPLAY_VER(dev_priv) >= 14) + fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS; + else + fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 1c7e6468f3e3..d1245c847f1c 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -507,8 +507,8 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, static int intel_dbuf_slice_size(struct drm_i915_private *i915) { - return INTEL_INFO(i915)->display.dbuf.size / - hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask); + return DISPLAY_INFO(i915)->dbuf.size / + hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask); } static void @@ -527,7 +527,7 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, ddb->end = fls(slice_mask) * slice_size; WARN_ON(ddb->start >= ddb->end); - WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size); + WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size); } static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) @@ -2625,7 +2625,7 @@ skl_compute_ddb(struct intel_atomic_state *state) "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices, - INTEL_INFO(i915)->display.dbuf.slice_mask, + DISPLAY_INFO(i915)->dbuf.slice_mask, str_yes_no(old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state->joined_mbus)); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 61d008d4e5f1..cd90a30e04d8 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -280,6 +280,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, int ret; drm_dbg_kms(&dev_priv->drm, "\n"); + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; ret = intel_panel_compute_config(intel_connector, adjusted_mode); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index b697badbbe71..ae0a0b11bae3 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -598,7 +598,7 @@ static void assert_dsi_pll(struct drm_i915_private *i915, bool state) cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN; vlv_cck_put(i915); - I915_STATE_WARN(cur_state != state, + I915_STATE_WARN(i915, cur_state != state, "DSI PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index acec6566b914..8f888d36f16d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index b8a39c219b60..718cb2c80f79 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -7,7 +7,8 @@ #define __INTEL_GT_REGS__ #include "i915_reg_defs.h" -#include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */ + +#define VLV_GUNIT_BASE 0x180000 /* * The perf control registers are technically multicast registers, but the @@ -1469,7 +1470,7 @@ #define GEN12_RCU_MODE _MMIO(0x14800) #define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0) -#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) +#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168) #define CHV_FGT_DISABLE_SS0 (1 << 10) #define CHV_FGT_DISABLE_SS1 (1 << 11) #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 797ea8340467..195ff72d7a14 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -7,7 +7,7 @@ #include <linux/stop_machine.h> #include <linux/string_helpers.h> -#include "display/intel_display.h" +#include "display/intel_display_reset.h" #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" @@ -20,6 +20,7 @@ #include "i915_file_private.h" #include "i915_gpu_error.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_breadcrumbs.h" #include "intel_engine_pm.h" #include "intel_engine_regs.h" @@ -1370,11 +1371,11 @@ static void intel_gt_reset_global(struct intel_gt *gt, /* Use a watchdog to ensure that our reset completes */ intel_wedge_on_timeout(&w, gt, 60 * HZ) { - intel_display_prepare_reset(gt->i915); + intel_display_reset_prepare(gt->i915); intel_gt_reset(gt, engine_mask, reason); - intel_display_finish_reset(gt->i915); + intel_display_reset_finish(gt->i915); } if (!test_bit(I915_WEDGED, >->reset.flags)) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index b2671ac59dc0..e68a99205599 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -8,8 +8,10 @@ #include <drm/i915_drm.h> #include "display/intel_display.h" +#include "display/intel_display_irq.h" #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 84e77e8dbba1..fb30f733b036 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -8,6 +8,7 @@ #include "gem/i915_gem_internal.h" +#include "i915_reg.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_engine_regs.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index f9bddaa876d9..c9f20385f6a0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -16,6 +16,7 @@ #include "intel_guc_submission.h" #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" /** * DOC: GuC diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 7c49a3d673a5..2a0438f12a14 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -463,10 +463,6 @@ static inline int get_aux_ch_reg(unsigned int offset) return reg; } -#define AUX_CTL_MSG_LENGTH(reg) \ - ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \ - DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) - /** * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write * @vgpu: a vGPU @@ -495,7 +491,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, return; } - msg_length = AUX_CTL_MSG_LENGTH(value); + msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg); + // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; @@ -510,8 +507,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1); vgpu_vreg(vgpu, offset) = DP_AUX_CH_CTL_DONE | - ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) & - DP_AUX_CH_CTL_MESSAGE_SIZE_MASK); + DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size); if (msg_length == 3) { if (!(op & GVT_AUX_I2C_MOT)) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4b45a041ac5c..a9f7fa9b90bd 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1562,7 +1562,7 @@ static int pf_write(struct intel_vgpu *vgpu, if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || - offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { + offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) { drm_WARN_ONCE(&i915->drm, true, "VM(%d): guest is trying to scaling a plane\n", vgpu->id); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e39937c82d73..76ccd4e03e31 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -50,6 +50,7 @@ #include "i915_debugfs_params.h" #include "i915_driver.h" #include "i915_irq.h" +#include "i915_reg.h" #include "i915_scheduler.h" #include "intel_mchbar_regs.h" diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 93fdc40d724f..8e92649124d4 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -48,6 +48,7 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" +#include "display/intel_display_driver.h" #include "display/intel_display_types.h" #include "display/intel_dmc.h" #include "display/intel_dp.h" @@ -221,8 +222,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->display.audio.mutex); mutex_init(&dev_priv->display.wm.wm_mutex); mutex_init(&dev_priv->display.pps.mutex); - mutex_init(&dev_priv->display.hdcp.comp_mutex); - spin_lock_init(&dev_priv->display.dkl.phy_lock); + mutex_init(&dev_priv->display.hdcp.hdcp_mutex); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); @@ -251,7 +251,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_detect_pch(dev_priv); intel_irq_init(dev_priv); - intel_init_display_hooks(dev_priv); + intel_display_driver_early_probe(dev_priv); intel_clock_gating_hooks_init(dev_priv); intel_detect_preproduction_hw(dev_priv); @@ -720,8 +720,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; - struct intel_device_info *device_info; - struct intel_runtime_info *runtime; struct drm_i915_private *i915; i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, @@ -734,14 +732,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) /* Device parameters start as a copy of module parameters. */ i915_params_copy(&i915->params, &i915_modparams); - /* Setup the write-once "constant" device info */ - device_info = mkwrite_device_info(i915); - memcpy(device_info, match_info, sizeof(*device_info)); - - /* Initialize initial runtime info from static const data and pdev. */ - runtime = RUNTIME_INFO(i915); - memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); - runtime->device_id = pdev->device; + /* Set up device info and initial runtime info. */ + intel_device_info_driver_create(i915, pdev->device, match_info); return i915; } @@ -752,7 +744,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) * @ent: matching PCI ID entry * * The driver probe routine has to do several things: - * - drive output discovery via intel_modeset_init() + * - drive output discovery via intel_display_driver_probe() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory @@ -762,13 +754,17 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_i915_private *i915; int ret; - i915 = i915_driver_create(pdev, ent); - if (IS_ERR(i915)) - return PTR_ERR(i915); - ret = pci_enable_device(pdev); - if (ret) - goto out_fini; + if (ret) { + pr_err("Failed to enable graphics device: %pe\n", ERR_PTR(ret)); + return ret; + } + + i915 = i915_driver_create(pdev, ent); + if (IS_ERR(i915)) { + ret = PTR_ERR(i915); + goto out_pci_disable; + } ret = i915_driver_early_probe(i915); if (ret < 0) @@ -790,7 +786,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - ret = intel_modeset_init_noirq(i915); + ret = intel_display_driver_probe_noirq(i915); if (ret < 0) goto out_cleanup_hw; @@ -798,7 +794,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_modeset; - ret = intel_modeset_init_nogem(i915); + ret = intel_display_driver_probe_nogem(i915); if (ret) goto out_cleanup_irq; @@ -808,7 +804,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) intel_pxp_init(i915); - ret = intel_modeset_init(i915); + ret = intel_display_driver_probe(i915); if (ret) goto out_cleanup_gem; @@ -828,14 +824,14 @@ out_cleanup_gem: i915_gem_driver_release(i915); out_cleanup_modeset2: /* FIXME clean up the error path */ - intel_modeset_driver_remove(i915); + intel_display_driver_remove(i915); intel_irq_uninstall(i915); - intel_modeset_driver_remove_noirq(i915); + intel_display_driver_remove_noirq(i915); goto out_cleanup_modeset; out_cleanup_irq: intel_irq_uninstall(i915); out_cleanup_modeset: - intel_modeset_driver_remove_nogem(i915); + intel_display_driver_remove_nogem(i915); out_cleanup_hw: i915_driver_hw_remove(i915); intel_memory_regions_driver_release(i915); @@ -851,7 +847,6 @@ out_runtime_pm_put: i915_driver_late_release(i915); out_pci_disable: pci_disable_device(pdev); -out_fini: i915_probe_error(i915, "Device initialization failed (%d)\n", ret); return ret; } @@ -871,16 +866,16 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_gvt_driver_remove(i915); - intel_modeset_driver_remove(i915); + intel_display_driver_remove(i915); intel_irq_uninstall(i915); - intel_modeset_driver_remove_noirq(i915); + intel_display_driver_remove_noirq(i915); i915_reset_error_state(i915); i915_gem_driver_remove(i915); - intel_modeset_driver_remove_nogem(i915); + intel_display_driver_remove_nogem(i915); i915_driver_hw_remove(i915); @@ -967,11 +962,19 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; + /* + * TODO: check and remove holding the modeset locks if none of + * the encoders depends on this. + */ drm_modeset_lock_all(&dev_priv->drm); for_each_intel_encoder(&dev_priv->drm, encoder) if (encoder->suspend) encoder->suspend(encoder); drm_modeset_unlock_all(&dev_priv->drm); + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (encoder->suspend_complete) + encoder->suspend_complete(encoder); } static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) @@ -981,11 +984,19 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; + /* + * TODO: check and remove holding the modeset locks if none of + * the encoders depends on this. + */ drm_modeset_lock_all(&dev_priv->drm); for_each_intel_encoder(&dev_priv->drm, encoder) if (encoder->shutdown) encoder->shutdown(encoder); drm_modeset_unlock_all(&dev_priv->drm); + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (encoder->shutdown_complete) + encoder->shutdown_complete(encoder); } void i915_driver_shutdown(struct drm_i915_private *i915) @@ -1052,7 +1063,7 @@ static int i915_drm_prepare(struct drm_device *dev) intel_pxp_suspend_prepare(i915->pxp); /* - * NB intel_display_suspend() may issue new requests after we've + * NB intel_display_driver_suspend() may issue new requests after we've * ostensibly marked the GPU as ready-to-sleep here. We need to * split out that work and pull it forward so that after point, * the GPU is not woken again. @@ -1076,7 +1087,7 @@ static int i915_drm_suspend(struct drm_device *dev) pci_save_state(pdev); - intel_display_suspend(dev); + intel_display_driver_suspend(dev_priv); intel_dp_mst_suspend(dev_priv); @@ -1107,18 +1118,6 @@ static int i915_drm_suspend(struct drm_device *dev) return 0; } -static enum i915_drm_suspend_mode -get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) -{ - if (hibernate) - return I915_DRM_SUSPEND_HIBERNATE; - - if (suspend_to_idle(dev_priv)) - return I915_DRM_SUSPEND_IDLE; - - return I915_DRM_SUSPEND_MEM; -} - static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -1126,6 +1125,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct intel_gt *gt; int ret, i; + bool s2idle = !hibernation && suspend_to_idle(dev_priv); disable_rpm_wakeref_asserts(rpm); @@ -1136,8 +1136,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) for_each_gt(gt, dev_priv, i) intel_uncore_suspend(gt->uncore); - intel_power_domains_suspend(dev_priv, - get_suspend_mode(dev_priv, hibernation)); + intel_power_domains_suspend(dev_priv, s2idle); intel_display_power_suspend_late(dev_priv); @@ -1233,7 +1232,7 @@ static int i915_drm_resume(struct drm_device *dev) * * drm_mode_config_reset() needs AUX interrupts. * - * Modeset enabling in intel_modeset_init_hw() also needs working + * Modeset enabling in intel_display_driver_init_hw() also needs working * interrupts. */ intel_runtime_pm_enable_interrupts(dev_priv); @@ -1243,13 +1242,14 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - intel_modeset_init_hw(dev_priv); + intel_display_driver_init_hw(dev_priv); + intel_clock_gating_init(dev_priv); intel_hpd_init(dev_priv); /* MST sideband requires HPD interrupts enabled */ intel_dp_mst_resume(dev_priv); - intel_display_resume(dev); + intel_display_driver_resume(dev_priv); intel_hpd_poll_disable(dev_priv); if (HAS_DISPLAY(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f23b030aaf09..f1205ed3ba71 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -205,6 +205,7 @@ struct drm_i915_private { const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ + struct intel_display_runtime_info __display_runtime; /* Access with DISPLAY_RUNTIME_INFO() */ struct intel_driver_caps caps; struct i915_dsm dsm; @@ -408,7 +409,9 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) #define INTEL_INFO(i915) (&(i915)->__info) +#define DISPLAY_INFO(i915) (INTEL_INFO(i915)->display) #define RUNTIME_INFO(i915) (&(i915)->__runtime) +#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->__display_runtime) #define DRIVER_CAPS(i915) (&(i915)->caps) #define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id) @@ -427,7 +430,7 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) -#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver) +#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) @@ -782,10 +785,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \ }) -#define HAS_OVERLAY(i915) (INTEL_INFO(i915)->display.has_overlay) -#define OVERLAY_NEEDS_PHYSICAL(i915) \ - (INTEL_INFO(i915)->display.overlay_needs_physical) - /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(i915) (IS_I830(i915) || IS_I845G(i915)) @@ -796,41 +795,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define NEEDS_WaRsDisableCoarsePowerGating(i915) \ (IS_SKL_GT3(i915) || IS_SKL_GT4(i915)) -#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4) -#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 11 || \ - IS_GEMINILAKE(i915) || \ - IS_KABYLAKE(i915)) - /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \ !(IS_I915G(i915) || IS_I915GM(i915))) -#define SUPPORTS_TV(i915) (INTEL_INFO(i915)->display.supports_tv) -#define I915_HAS_HOTPLUG(i915) (INTEL_INFO(i915)->display.has_hotplug) - -#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2) -#define HAS_FBC(i915) (RUNTIME_INFO(i915)->fbc_mask != 0) -#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7) - -#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13) - -#define HAS_IPS(i915) (IS_HSW_ULT(i915) || IS_BROADWELL(i915)) - -#define HAS_DP_MST(i915) (INTEL_INFO(i915)->display.has_dp_mst) -#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14) - -#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915)) - -#define HAS_CDCLK_CRAWL(i915) (INTEL_INFO(i915)->display.has_cdclk_crawl) -#define HAS_CDCLK_SQUASH(i915) (INTEL_INFO(i915)->display.has_cdclk_squash) -#define HAS_DDI(i915) (INTEL_INFO(i915)->display.has_ddi) -#define HAS_FPGA_DBG_UNCLAIMED(i915) (INTEL_INFO(i915)->display.has_fpga_dbg) -#define HAS_PSR(i915) (INTEL_INFO(i915)->display.has_psr) -#define HAS_PSR_HW_TRACKING(i915) \ - (INTEL_INFO(i915)->display.has_psr_hw_tracking) -#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12) -#define HAS_TRANSCODER(i915, trans) ((RUNTIME_INFO(i915)->cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6) #define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p) @@ -838,11 +807,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps) -#define HAS_DMC(i915) (RUNTIME_INFO(i915)->has_dmc) -#define HAS_DSB(i915) (INTEL_INFO(i915)->display.has_dsb) -#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc) -#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) - #define HAS_HECI_PXP(i915) \ (INTEL_INFO(i915)->has_heci_pxp) @@ -851,8 +815,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_HECI_GSC(i915) (HAS_HECI_PXP(i915) || HAS_HECI_GSCFI(i915)) -#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) - #define HAS_RUNTIME_PM(i915) (INTEL_INFO(i915)->has_runtime_pm) #define HAS_64BIT_RELOC(i915) (INTEL_INFO(i915)->has_64bit_reloc) @@ -869,9 +831,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages) -#define HAS_IPC(i915) (INTEL_INFO(i915)->display.has_ipc) -#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915)) - #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) @@ -889,12 +848,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GLOBAL_MOCS_REGISTERS(i915) (INTEL_INFO(i915)->has_global_mocs) -#define HAS_GMCH(i915) (INTEL_INFO(i915)->display.has_gmch) - #define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id) -#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10)) - #define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read) /* DPF == dynamic parity feature */ @@ -902,14 +857,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \ 2 : HAS_L3_DPF(i915)) -#define INTEL_NUM_PIPES(i915) (hweight8(RUNTIME_INFO(i915)->pipe_mask)) - -#define HAS_DISPLAY(i915) (RUNTIME_INFO(i915)->pipe_mask != 0) - -#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) - -#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) - /* Only valid when HAS_DISPLAY() is true */ #define INTEL_DISPLAY_ENABLED(i915) \ (drm_WARN_ON(&(i915)->drm, !HAS_DISPLAY(i915)), \ @@ -919,11 +866,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GUC_DEPRIVILEGE(i915) \ (INTEL_INFO(i915)->has_guc_deprivilege) -#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || \ - IS_ALDERLAKE_S(i915)) - -#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) - #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline) #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) @@ -931,11 +873,4 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \ GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) -/* intel_device_info.c */ -static inline struct intel_device_info * -mkwrite_device_info(struct drm_i915_private *dev_priv) -{ - return (struct intel_device_info *)INTEL_INFO(dev_priv); -} - #endif diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index a3bdd9f68a45..975da8e7f2a9 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -269,7 +269,7 @@ static const struct attribute_group *hwm_groups[] = { NULL }; -static const struct hwmon_channel_info *hwm_info[] = { +static const struct hwmon_channel_info * const hwm_info[] = { HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT), @@ -277,7 +277,7 @@ static const struct hwmon_channel_info *hwm_info[] = { NULL }; -static const struct hwmon_channel_info *hwm_gt_info[] = { +static const struct hwmon_channel_info * const hwm_gt_info[] = { HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT), NULL }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 53c83e257055..82fbabcdd7a5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -33,15 +33,11 @@ #include <drm/drm_drv.h> -#include "display/icl_dsi_regs.h" -#include "display/intel_de.h" -#include "display/intel_display_trace.h" +#include "display/intel_display_irq.h" #include "display/intel_display_types.h" -#include "display/intel_fdi_regs.h" -#include "display/intel_fifo_underrun.h" #include "display/intel_hotplug.h" +#include "display/intel_hotplug_irq.h" #include "display/intel_lpe_audio.h" -#include "display/intel_psr.h" #include "display/intel_psr_regs.h" #include "gt/intel_breadcrumbs.h" @@ -54,6 +50,7 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" /** * DOC: interrupt handling @@ -81,159 +78,6 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915, WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); } -typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); -typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); - -static const u32 hpd_ilk[HPD_NUM_PINS] = { - [HPD_PORT_A] = DE_DP_A_HOTPLUG, -}; - -static const u32 hpd_ivb[HPD_NUM_PINS] = { - [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, -}; - -static const u32 hpd_bdw[HPD_NUM_PINS] = { - [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), -}; - -static const u32 hpd_ibx[HPD_NUM_PINS] = { - [HPD_CRT] = SDE_CRT_HOTPLUG, - [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, - [HPD_PORT_B] = SDE_PORTB_HOTPLUG, - [HPD_PORT_C] = SDE_PORTC_HOTPLUG, - [HPD_PORT_D] = SDE_PORTD_HOTPLUG, -}; - -static const u32 hpd_cpt[HPD_NUM_PINS] = { - [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, - [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, - [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, - [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, - [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, -}; - -static const u32 hpd_spt[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, - [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, - [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, - [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, - [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, -}; - -static const u32 hpd_mask_i915[HPD_NUM_PINS] = { - [HPD_CRT] = CRT_HOTPLUG_INT_EN, - [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, - [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, - [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, - [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, -}; - -static const u32 hpd_status_g4x[HPD_NUM_PINS] = { - [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, - [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, - [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, - [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, - [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, -}; - -static const u32 hpd_status_i915[HPD_NUM_PINS] = { - [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, - [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, - [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, - [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, - [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, -}; - -static const u32 hpd_bxt[HPD_NUM_PINS] = { - [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), - [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), - [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), -}; - -static const u32 hpd_gen11[HPD_NUM_PINS] = { - [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), - [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), - [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), - [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), - [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), - [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), -}; - -static const u32 hpd_icp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), - [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), - [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), - [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), - [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), - [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), - [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), - [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), -}; - -static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), - [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), - [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), - [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), -}; - -static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) -{ - struct intel_hotplug *hpd = &dev_priv->display.hotplug; - - if (HAS_GMCH(dev_priv)) { - if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) - hpd->hpd = hpd_status_g4x; - else - hpd->hpd = hpd_status_i915; - return; - } - - if (DISPLAY_VER(dev_priv) >= 11) - hpd->hpd = hpd_gen11; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - hpd->hpd = hpd_bxt; - else if (DISPLAY_VER(dev_priv) == 9) - hpd->hpd = NULL; /* no north HPD on SKL */ - else if (DISPLAY_VER(dev_priv) >= 8) - hpd->hpd = hpd_bdw; - else if (DISPLAY_VER(dev_priv) >= 7) - hpd->hpd = hpd_ivb; - else - hpd->hpd = hpd_ilk; - - if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && - (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) - return; - - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) - hpd->pch_hpd = hpd_sde_dg1; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - hpd->pch_hpd = hpd_icp; - else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) - hpd->pch_hpd = hpd_spt; - else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) - hpd->pch_hpd = hpd_cpt; - else if (HAS_PCH_IBX(dev_priv)) - hpd->pch_hpd = hpd_ibx; - else - MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); -} - -static void -intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - - drm_crtc_handle_vblank(&crtc->base); -} - void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, i915_reg_t iir, i915_reg_t ier) { @@ -266,7 +110,7 @@ static void gen2_irq_reset(struct intel_uncore *uncore) /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { u32 val = intel_uncore_read(uncore, reg); @@ -320,302 +164,6 @@ static void gen2_irq_init(struct intel_uncore *uncore, intel_uncore_posting_read16(uncore, GEN2_IMR); } -/* For display hotplug interrupt */ -static inline void -i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, - u32 mask, - u32 bits) -{ - lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, bits & ~mask); - - intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits); -} - -/** - * i915_hotplug_interrupt_update - update hotplug interrupt enable - * @dev_priv: driver private - * @mask: bits to update - * @bits: bits to enable - * NOTE: the HPD enable bits are modified both inside and outside - * of an interrupt context. To avoid that read-modify-write cycles - * interfer, these bits are protected by a spinlock. Since this - * function is usually not called from a context where the lock is - * held already, this function acquires the lock itself. A non-locking - * version is also available. - */ -void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - u32 mask, - u32 bits) -{ - spin_lock_irq(&dev_priv->irq_lock); - i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); - spin_unlock_irq(&dev_priv->irq_lock); -} - -/** - * ilk_update_display_irq - update DEIMR - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ -static void ilk_update_display_irq(struct drm_i915_private *dev_priv, - u32 interrupt_mask, u32 enabled_irq_mask) -{ - u32 new_val; - - lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - - new_val = dev_priv->irq_mask; - new_val &= ~interrupt_mask; - new_val |= (~enabled_irq_mask & interrupt_mask); - - if (new_val != dev_priv->irq_mask && - !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { - dev_priv->irq_mask = new_val; - intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); - intel_uncore_posting_read(&dev_priv->uncore, DEIMR); - } -} - -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) -{ - ilk_update_display_irq(i915, bits, bits); -} - -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) -{ - ilk_update_display_irq(i915, bits, 0); -} - -/** - * bdw_update_port_irq - update DE port interrupt - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ -static void bdw_update_port_irq(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask) -{ - u32 new_val; - u32 old_val; - - lockdep_assert_held(&dev_priv->irq_lock); - - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) - return; - - old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); - - new_val = old_val; - new_val &= ~interrupt_mask; - new_val |= (~enabled_irq_mask & interrupt_mask); - - if (new_val != old_val) { - intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); - intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); - } -} - -/** - * bdw_update_pipe_irq - update DE pipe interrupt - * @dev_priv: driver private - * @pipe: pipe whose interrupt to update - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ -static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 interrupt_mask, - u32 enabled_irq_mask) -{ - u32 new_val; - - lockdep_assert_held(&dev_priv->irq_lock); - - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) - return; - - new_val = dev_priv->de_irq_mask[pipe]; - new_val &= ~interrupt_mask; - new_val |= (~enabled_irq_mask & interrupt_mask); - - if (new_val != dev_priv->de_irq_mask[pipe]) { - dev_priv->de_irq_mask[pipe] = new_val; - intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); - } -} - -void bdw_enable_pipe_irq(struct drm_i915_private *i915, - enum pipe pipe, u32 bits) -{ - bdw_update_pipe_irq(i915, pipe, bits, bits); -} - -void bdw_disable_pipe_irq(struct drm_i915_private *i915, - enum pipe pipe, u32 bits) -{ - bdw_update_pipe_irq(i915, pipe, bits, 0); -} - -/** - * ibx_display_interrupt_update - update SDEIMR - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ -static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask) -{ - u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); - sdeimr &= ~interrupt_mask; - sdeimr |= (~enabled_irq_mask & interrupt_mask); - - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - - lockdep_assert_held(&dev_priv->irq_lock); - - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) - return; - - intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); - intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); -} - -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) -{ - ibx_display_interrupt_update(i915, bits, bits); -} - -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) -{ - ibx_display_interrupt_update(i915, bits, 0); -} - -u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; - u32 enable_mask = status_mask << 16; - - lockdep_assert_held(&dev_priv->irq_lock); - - if (DISPLAY_VER(dev_priv) < 5) - goto out; - - /* - * On pipe A we don't support the PSR interrupt yet, - * on pipe B and C the same bit MBZ. - */ - if (drm_WARN_ON_ONCE(&dev_priv->drm, - status_mask & PIPE_A_PSR_STATUS_VLV)) - return 0; - /* - * On pipe B and C we don't support the PSR interrupt yet, on pipe - * A the same bit is for perf counters which we don't use either. - */ - if (drm_WARN_ON_ONCE(&dev_priv->drm, - status_mask & PIPE_B_PSR_STATUS_VLV)) - return 0; - - enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | - SPRITE0_FLIP_DONE_INT_EN_VLV | - SPRITE1_FLIP_DONE_INT_EN_VLV); - if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) - enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; - if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) - enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; - -out: - drm_WARN_ONCE(&dev_priv->drm, - enable_mask & ~PIPESTAT_INT_ENABLE_MASK || - status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", - pipe_name(pipe), enable_mask, status_mask); - - return enable_mask; -} - -void i915_enable_pipestat(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 status_mask) -{ - i915_reg_t reg = PIPESTAT(pipe); - u32 enable_mask; - - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: status_mask=0x%x\n", - pipe_name(pipe), status_mask); - - lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); - - if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) - return; - - dev_priv->pipestat_irq_mask[pipe] |= status_mask; - enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - - intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); - intel_uncore_posting_read(&dev_priv->uncore, reg); -} - -void i915_disable_pipestat(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 status_mask) -{ - i915_reg_t reg = PIPESTAT(pipe); - u32 enable_mask; - - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: status_mask=0x%x\n", - pipe_name(pipe), status_mask); - - lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); - - if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) - return; - - dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; - enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - - intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); - intel_uncore_posting_read(&dev_priv->uncore, reg); -} - -static bool i915_has_asle(struct drm_i915_private *dev_priv) -{ - if (!dev_priv->display.opregion.asle) - return false; - - return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); -} - -/** - * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion - * @dev_priv: i915 device private - */ -static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) -{ - if (!i915_has_asle(dev_priv)) - return; - - spin_lock_irq(&dev_priv->irq_lock); - - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (DISPLAY_VER(dev_priv) >= 4) - i915_enable_pipestat(dev_priv, PIPE_A, - PIPE_LEGACY_BLC_EVENT_STATUS); - - spin_unlock_irq(&dev_priv->irq_lock); -} - /** * ivb_parity_work - Workqueue called when a parity error interrupt * occurred. @@ -700,544 +248,6 @@ out: mutex_unlock(&dev_priv->drm.struct_mutex); } -static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_TC1: - case HPD_PORT_TC2: - case HPD_PORT_TC3: - case HPD_PORT_TC4: - case HPD_PORT_TC5: - case HPD_PORT_TC6: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); - default: - return false; - } -} - -static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - return val & PORTA_HOTPLUG_LONG_DETECT; - case HPD_PORT_B: - return val & PORTB_HOTPLUG_LONG_DETECT; - case HPD_PORT_C: - return val & PORTC_HOTPLUG_LONG_DETECT; - default: - return false; - } -} - -static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - case HPD_PORT_B: - case HPD_PORT_C: - case HPD_PORT_D: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); - default: - return false; - } -} - -static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_TC1: - case HPD_PORT_TC2: - case HPD_PORT_TC3: - case HPD_PORT_TC4: - case HPD_PORT_TC5: - case HPD_PORT_TC6: - return val & ICP_TC_HPD_LONG_DETECT(pin); - default: - return false; - } -} - -static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_E: - return val & PORTE_HOTPLUG_LONG_DETECT; - default: - return false; - } -} - -static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - return val & PORTA_HOTPLUG_LONG_DETECT; - case HPD_PORT_B: - return val & PORTB_HOTPLUG_LONG_DETECT; - case HPD_PORT_C: - return val & PORTC_HOTPLUG_LONG_DETECT; - case HPD_PORT_D: - return val & PORTD_HOTPLUG_LONG_DETECT; - default: - return false; - } -} - -static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; - default: - return false; - } -} - -static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_B: - return val & PORTB_HOTPLUG_LONG_DETECT; - case HPD_PORT_C: - return val & PORTC_HOTPLUG_LONG_DETECT; - case HPD_PORT_D: - return val & PORTD_HOTPLUG_LONG_DETECT; - default: - return false; - } -} - -static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_B: - return val & PORTB_HOTPLUG_INT_LONG_PULSE; - case HPD_PORT_C: - return val & PORTC_HOTPLUG_INT_LONG_PULSE; - case HPD_PORT_D: - return val & PORTD_HOTPLUG_INT_LONG_PULSE; - default: - return false; - } -} - -/* - * Get a bit mask of pins that have triggered, and which ones may be long. - * This can be called multiple times with the same masks to accumulate - * hotplug detection results from several registers. - * - * Note that the caller is expected to zero out the masks initially. - */ -static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, - u32 *pin_mask, u32 *long_mask, - u32 hotplug_trigger, u32 dig_hotplug_reg, - const u32 hpd[HPD_NUM_PINS], - bool long_pulse_detect(enum hpd_pin pin, u32 val)) -{ - enum hpd_pin pin; - - BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); - - for_each_hpd_pin(pin) { - if ((hpd[pin] & hotplug_trigger) == 0) - continue; - - *pin_mask |= BIT(pin); - - if (long_pulse_detect(pin, dig_hotplug_reg)) - *long_mask |= BIT(pin); - } - - drm_dbg(&dev_priv->drm, - "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); - -} - -static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, - const u32 hpd[HPD_NUM_PINS]) -{ - struct intel_encoder *encoder; - u32 enabled_irqs = 0; - - for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) - enabled_irqs |= hpd[encoder->hpd_pin]; - - return enabled_irqs; -} - -static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, - const u32 hpd[HPD_NUM_PINS]) -{ - struct intel_encoder *encoder; - u32 hotplug_irqs = 0; - - for_each_intel_encoder(&dev_priv->drm, encoder) - hotplug_irqs |= hpd[encoder->hpd_pin]; - - return hotplug_irqs; -} - -static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, - hotplug_enables_func hotplug_enables) -{ - struct intel_encoder *encoder; - u32 hotplug = 0; - - for_each_intel_encoder(&i915->drm, encoder) - hotplug |= hotplug_enables(encoder); - - return hotplug; -} - -static void gmbus_irq_handler(struct drm_i915_private *dev_priv) -{ - wake_up_all(&dev_priv->display.gmbus.wait_queue); -} - -static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) -{ - wake_up_all(&dev_priv->display.gmbus.wait_queue); -} - -#if defined(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, - enum pipe pipe, - u32 crc0, u32 crc1, - u32 crc2, u32 crc3, - u32 crc4) -{ - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; - u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; - - trace_intel_pipe_crc(crtc, crcs); - - spin_lock(&pipe_crc->lock); - /* - * For some not yet identified reason, the first CRC is - * bonkers. So let's just wait for the next vblank and read - * out the buggy result. - * - * On GEN8+ sometimes the second CRC is bonkers as well, so - * don't trust that one either. - */ - if (pipe_crc->skipped <= 0 || - (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { - pipe_crc->skipped++; - spin_unlock(&pipe_crc->lock); - return; - } - spin_unlock(&pipe_crc->lock); - - drm_crtc_add_crc_entry(&crtc->base, true, - drm_crtc_accurate_vblank_count(&crtc->base), - crcs); -} -#else -static inline void -display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, - enum pipe pipe, - u32 crc0, u32 crc1, - u32 crc2, u32 crc3, - u32 crc4) {} -#endif - -static void flip_done_handler(struct drm_i915_private *i915, - enum pipe pipe) -{ - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); - struct drm_crtc_state *crtc_state = crtc->base.state; - struct drm_pending_vblank_event *e = crtc_state->event; - struct drm_device *dev = &i915->drm; - unsigned long irqflags; - - spin_lock_irqsave(&dev->event_lock, irqflags); - - crtc_state->event = NULL; - - drm_crtc_send_vblank_event(&crtc->base, e); - - spin_unlock_irqrestore(&dev->event_lock, irqflags); -} - -static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - display_pipe_crc_irq_handler(dev_priv, pipe, - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), - 0, 0, 0, 0); -} - -static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - display_pipe_crc_irq_handler(dev_priv, pipe, - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); -} - -static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 res1, res2; - - if (DISPLAY_VER(dev_priv) >= 3) - res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); - else - res1 = 0; - - if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) - res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); - else - res2 = 0; - - display_pipe_crc_irq_handler(dev_priv, pipe, - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), - intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), - res1, res2); -} - -static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), - PIPESTAT_INT_STATUS_MASK | - PIPE_FIFO_UNDERRUN_STATUS); - - dev_priv->pipestat_irq_mask[pipe] = 0; - } -} - -static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, - u32 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - enum pipe pipe; - - spin_lock(&dev_priv->irq_lock); - - if (!dev_priv->display_irqs_enabled) { - spin_unlock(&dev_priv->irq_lock); - return; - } - - for_each_pipe(dev_priv, pipe) { - i915_reg_t reg; - u32 status_mask, enable_mask, iir_bit = 0; - - /* - * PIPESTAT bits get signalled even when the interrupt is - * disabled with the mask bits, and some of the status bits do - * not generate interrupts at all (like the underrun bit). Hence - * we need to be careful that we only handle what we want to - * handle. - */ - - /* fifo underruns are filterered in the underrun handler. */ - status_mask = PIPE_FIFO_UNDERRUN_STATUS; - - switch (pipe) { - default: - case PIPE_A: - iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; - break; - case PIPE_B: - iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; - break; - case PIPE_C: - iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - break; - } - if (iir & iir_bit) - status_mask |= dev_priv->pipestat_irq_mask[pipe]; - - if (!status_mask) - continue; - - reg = PIPESTAT(pipe); - pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; - enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - - /* - * Clear the PIPE*STAT regs before the IIR - * - * Toggle the enable bits to make sure we get an - * edge in the ISR pipe event bit if we don't clear - * all the enabled status bits. Otherwise the edge - * triggered IIR on i965/g4x wouldn't notice that - * an interrupt is still pending. - */ - if (pipe_stats[pipe]) { - intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); - intel_uncore_write(&dev_priv->uncore, reg, enable_mask); - } - } - spin_unlock(&dev_priv->irq_lock); -} - -static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u16 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } -} - -static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u32 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - bool blc_event = false; - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) - blc_event = true; - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } - - if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); -} - -static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u32 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - bool blc_event = false; - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) - blc_event = true; - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } - - if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); - - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev_priv); -} - -static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u32 pipe_stats[I915_MAX_PIPES]) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) - flip_done_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } - - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev_priv); -} - -static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) -{ - u32 hotplug_status = 0, hotplug_status_mask; - int i; - - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | - DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; - else - hotplug_status_mask = HOTPLUG_INT_STATUS_I915; - - /* - * We absolutely have to clear all the pending interrupt - * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port - * interrupt bit won't have an edge, and the i965/g4x - * edge triggered IIR will not notice that an interrupt - * is still pending. We can't use PORT_HOTPLUG_EN to - * guarantee the edge as the act of toggling the enable - * bits can itself generate a new hotplug interrupt :( - */ - for (i = 0; i < 10; i++) { - u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; - - if (tmp == 0) - return hotplug_status; - - hotplug_status |= tmp; - intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); - } - - drm_WARN_ONCE(&dev_priv->drm, 1, - "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", - intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); - - return hotplug_status; -} - -static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, - u32 hotplug_status) -{ - u32 pin_mask = 0, long_mask = 0; - u32 hotplug_trigger; - - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; - else - hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - - if (hotplug_trigger) { - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug_trigger, hotplug_trigger, - dev_priv->display.hotplug.hpd, - i9xx_port_hotplug_long_detect); - - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - } - - if ((IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev_priv); -} - static irqreturn_t valleyview_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; @@ -1402,338 +412,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) return ret; } -static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, - u32 hotplug_trigger) -{ - u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - - /* - * Somehow the PCH doesn't seem to really ack the interrupt to the CPU - * unless we touch the hotplug register, even if hotplug_trigger is - * zero. Not acking leads to "The master control interrupt lied (SDE)!" - * errors. - */ - dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); - if (!hotplug_trigger) { - u32 mask = PORTA_HOTPLUG_STATUS_MASK | - PORTD_HOTPLUG_STATUS_MASK | - PORTC_HOTPLUG_STATUS_MASK | - PORTB_HOTPLUG_STATUS_MASK; - dig_hotplug_reg &= ~mask; - } - - intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); - if (!hotplug_trigger) - return; - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, - pch_port_hotplug_long_detect); - - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); -} - -static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - enum pipe pipe; - u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); - - if (pch_iir & SDE_AUDIO_POWER_MASK) { - int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> - SDE_AUDIO_POWER_SHIFT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", - port_name(port)); - } - - if (pch_iir & SDE_AUX_MASK) - dp_aux_irq_handler(dev_priv); - - if (pch_iir & SDE_GMBUS) - gmbus_irq_handler(dev_priv); - - if (pch_iir & SDE_AUDIO_HDCP_MASK) - drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); - - if (pch_iir & SDE_AUDIO_TRANS_MASK) - drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); - - if (pch_iir & SDE_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); - - if (pch_iir & SDE_FDI_MASK) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", - pipe_name(pipe), - intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); - } - - if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) - drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); - - if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) - drm_dbg(&dev_priv->drm, - "PCH transcoder CRC error interrupt\n"); - - if (pch_iir & SDE_TRANSA_FIFO_UNDER) - intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); - - if (pch_iir & SDE_TRANSB_FIFO_UNDER) - intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); -} - -static void ivb_err_int_handler(struct drm_i915_private *dev_priv) -{ - u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); - enum pipe pipe; - - if (err_int & ERR_INT_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); - - for_each_pipe(dev_priv, pipe) { - if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - - if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev_priv)) - ivb_pipe_crc_irq_handler(dev_priv, pipe); - else - hsw_pipe_crc_irq_handler(dev_priv, pipe); - } - } - - intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); -} - -static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) -{ - u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); - enum pipe pipe; - - if (serr_int & SERR_INT_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); - - for_each_pipe(dev_priv, pipe) - if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) - intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); - - intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); -} - -static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - enum pipe pipe; - u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); - - if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { - int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> - SDE_AUDIO_POWER_SHIFT_CPT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", - port_name(port)); - } - - if (pch_iir & SDE_AUX_MASK_CPT) - dp_aux_irq_handler(dev_priv); - - if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev_priv); - - if (pch_iir & SDE_AUDIO_CP_REQ_CPT) - drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); - - if (pch_iir & SDE_AUDIO_CP_CHG_CPT) - drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); - - if (pch_iir & SDE_FDI_MASK_CPT) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", - pipe_name(pipe), - intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); - } - - if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev_priv); -} - -static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; - u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; - u32 pin_mask = 0, long_mask = 0; - - if (ddi_hotplug_trigger) { - u32 dig_hotplug_reg; - - /* Locking due to DSI native GPIO sequences */ - spin_lock(&dev_priv->irq_lock); - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); - spin_unlock(&dev_priv->irq_lock); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - ddi_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, - icp_ddi_port_hotplug_long_detect); - } - - if (tc_hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - tc_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, - icp_tc_port_hotplug_long_detect); - } - - if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - - if (pch_iir & SDE_GMBUS_ICP) - gmbus_irq_handler(dev_priv); -} - -static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & - ~SDE_PORTE_HOTPLUG_SPT; - u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; - u32 pin_mask = 0, long_mask = 0; - - if (hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, - spt_port_hotplug_long_detect); - } - - if (hotplug2_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug2_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, - spt_port_hotplug2_long_detect); - } - - if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - - if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev_priv); -} - -static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, - u32 hotplug_trigger) -{ - u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, - ilk_port_hotplug_long_detect); - - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); -} - -static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, - u32 de_iir) -{ - enum pipe pipe; - u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; - - if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); - - if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev_priv); - - if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev_priv); - - if (de_iir & DE_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); - - for_each_pipe(dev_priv, pipe) { - if (de_iir & DE_PIPE_VBLANK(pipe)) - intel_handle_vblank(dev_priv, pipe); - - if (de_iir & DE_PLANE_FLIP_DONE(pipe)) - flip_done_handler(dev_priv, pipe); - - if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - - if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - } - - /* check event from PCH */ - if (de_iir & DE_PCH_EVENT) { - u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); - - if (HAS_PCH_CPT(dev_priv)) - cpt_irq_handler(dev_priv, pch_iir); - else - ibx_irq_handler(dev_priv, pch_iir); - - /* should clear PCH hotplug event before clear CPU irq */ - intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); - } - - if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) - gen5_rps_irq_handler(&to_gt(dev_priv)->rps); -} - -static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, - u32 de_iir) -{ - enum pipe pipe; - u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; - - if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); - - if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev_priv); - - if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev_priv); - - if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev_priv); - - for_each_pipe(dev_priv, pipe) { - if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) - intel_handle_vblank(dev_priv, pipe); - - if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) - flip_done_handler(dev_priv, pipe); - } - - /* check event from PCH */ - if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { - u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); - - cpt_irq_handler(dev_priv, pch_iir); - - /* clear PCH hotplug event before clear CPU irq */ - intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); - } -} - /* * To handle irqs with the minimum potential races with fresh interrupts, we: * 1 - Disable Master Interrupt Control. @@ -1812,376 +490,6 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) return ret; } -static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, - u32 hotplug_trigger) -{ - u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, - bxt_port_hotplug_long_detect); - - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); -} - -static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) -{ - u32 pin_mask = 0, long_mask = 0; - u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; - u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; - - if (trigger_tc) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - trigger_tc, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, - gen11_port_hotplug_long_detect); - } - - if (trigger_tbt) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - trigger_tbt, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, - gen11_port_hotplug_long_detect); - } - - if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - else - drm_err(&dev_priv->drm, - "Unexpected DE HPD interrupt 0x%08x\n", iir); -} - -static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) -{ - u32 mask; - - if (DISPLAY_VER(dev_priv) >= 13) - return TGL_DE_PORT_AUX_DDIA | - TGL_DE_PORT_AUX_DDIB | - TGL_DE_PORT_AUX_DDIC | - XELPD_DE_PORT_AUX_DDID | - XELPD_DE_PORT_AUX_DDIE | - TGL_DE_PORT_AUX_USBC1 | - TGL_DE_PORT_AUX_USBC2 | - TGL_DE_PORT_AUX_USBC3 | - TGL_DE_PORT_AUX_USBC4; - else if (DISPLAY_VER(dev_priv) >= 12) - return TGL_DE_PORT_AUX_DDIA | - TGL_DE_PORT_AUX_DDIB | - TGL_DE_PORT_AUX_DDIC | - TGL_DE_PORT_AUX_USBC1 | - TGL_DE_PORT_AUX_USBC2 | - TGL_DE_PORT_AUX_USBC3 | - TGL_DE_PORT_AUX_USBC4 | - TGL_DE_PORT_AUX_USBC5 | - TGL_DE_PORT_AUX_USBC6; - - - mask = GEN8_AUX_CHANNEL_A; - if (DISPLAY_VER(dev_priv) >= 9) - mask |= GEN9_AUX_CHANNEL_B | - GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - if (DISPLAY_VER(dev_priv) == 11) { - mask |= ICL_AUX_CHANNEL_F; - mask |= ICL_AUX_CHANNEL_E; - } - - return mask; -} - -static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) - return RKL_DE_PIPE_IRQ_FAULT_ERRORS; - else if (DISPLAY_VER(dev_priv) >= 11) - return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; - else if (DISPLAY_VER(dev_priv) >= 9) - return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; -} - -static void -gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) -{ - bool found = false; - - if (iir & GEN8_DE_MISC_GSE) { - intel_opregion_asle_intr(dev_priv); - found = true; - } - - if (iir & GEN8_DE_EDP_PSR) { - struct intel_encoder *encoder; - u32 psr_iir; - i915_reg_t iir_reg; - - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (DISPLAY_VER(dev_priv) >= 12) - iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); - else - iir_reg = EDP_PSR_IIR; - - psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); - - if (psr_iir) - found = true; - - intel_psr_irq_handler(intel_dp, psr_iir); - - /* prior GEN12 only have one EDP PSR */ - if (DISPLAY_VER(dev_priv) < 12) - break; - } - } - - if (!found) - drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); -} - -static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, - u32 te_trigger) -{ - enum pipe pipe = INVALID_PIPE; - enum transcoder dsi_trans; - enum port port; - u32 val, tmp; - - /* - * Incase of dual link, TE comes from DSI_1 - * this is to check if dual link is enabled - */ - val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); - val &= PORT_SYNC_MODE_ENABLE; - - /* - * if dual link is enabled, then read DSI_0 - * transcoder registers - */ - port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? - PORT_A : PORT_B; - dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; - - /* Check if DSI configured in command mode */ - val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); - val = val & OP_MODE_MASK; - - if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { - drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); - return; - } - - /* Get PIPE for handling VBLANK event */ - val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); - switch (val & TRANS_DDI_EDP_INPUT_MASK) { - case TRANS_DDI_EDP_INPUT_A_ON: - pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - pipe = PIPE_C; - break; - default: - drm_err(&dev_priv->drm, "Invalid PIPE\n"); - return; - } - - intel_handle_vblank(dev_priv, pipe); - - /* clear TE in dsi IIR */ - port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; - tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); -} - -static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) -{ - if (DISPLAY_VER(i915) >= 9) - return GEN9_PIPE_PLANE1_FLIP_DONE; - else - return GEN8_PIPE_PRIMARY_FLIP_DONE; -} - -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) -{ - u32 mask = GEN8_PIPE_FIFO_UNDERRUN; - - if (DISPLAY_VER(dev_priv) >= 13) - mask |= XELPD_PIPE_SOFT_UNDERRUN | - XELPD_PIPE_HARD_UNDERRUN; - - return mask; -} - -static irqreturn_t -gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) -{ - irqreturn_t ret = IRQ_NONE; - u32 iir; - enum pipe pipe; - - drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); - - if (master_ctl & GEN8_DE_MISC_IRQ) { - iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); - if (iir) { - intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); - ret = IRQ_HANDLED; - gen8_de_misc_irq_handler(dev_priv, iir); - } else { - drm_err_ratelimited(&dev_priv->drm, - "The master control interrupt lied (DE MISC)!\n"); - } - } - - if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { - iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); - if (iir) { - intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); - ret = IRQ_HANDLED; - gen11_hpd_irq_handler(dev_priv, iir); - } else { - drm_err_ratelimited(&dev_priv->drm, - "The master control interrupt lied, (DE HPD)!\n"); - } - } - - if (master_ctl & GEN8_DE_PORT_IRQ) { - iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); - if (iir) { - bool found = false; - - intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); - ret = IRQ_HANDLED; - - if (iir & gen8_de_port_aux_mask(dev_priv)) { - dp_aux_irq_handler(dev_priv); - found = true; - } - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; - - if (hotplug_trigger) { - bxt_hpd_irq_handler(dev_priv, hotplug_trigger); - found = true; - } - } else if (IS_BROADWELL(dev_priv)) { - u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; - - if (hotplug_trigger) { - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); - found = true; - } - } - - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - (iir & BXT_DE_PORT_GMBUS)) { - gmbus_irq_handler(dev_priv); - found = true; - } - - if (DISPLAY_VER(dev_priv) >= 11) { - u32 te_trigger = iir & (DSI0_TE | DSI1_TE); - - if (te_trigger) { - gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); - found = true; - } - } - - if (!found) - drm_err_ratelimited(&dev_priv->drm, - "Unexpected DE Port interrupt\n"); - } - else - drm_err_ratelimited(&dev_priv->drm, - "The master control interrupt lied (DE PORT)!\n"); - } - - for_each_pipe(dev_priv, pipe) { - u32 fault_errors; - - if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) - continue; - - iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); - if (!iir) { - drm_err_ratelimited(&dev_priv->drm, - "The master control interrupt lied (DE PIPE)!\n"); - continue; - } - - ret = IRQ_HANDLED; - intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); - - if (iir & GEN8_PIPE_VBLANK) - intel_handle_vblank(dev_priv, pipe); - - if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) - flip_done_handler(dev_priv, pipe); - - if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev_priv, pipe); - - if (iir & gen8_de_pipe_underrun_mask(dev_priv)) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - - fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); - if (fault_errors) - drm_err_ratelimited(&dev_priv->drm, - "Fault errors on pipe %c: 0x%08x\n", - pipe_name(pipe), - fault_errors); - } - - if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && - master_ctl & GEN8_DE_PCH_IRQ) { - /* - * FIXME(BDW): Assume for now that the new interrupt handling - * scheme also closed the SDE interrupt handling race we've seen - * on older pch-split platforms. But this needs testing. - */ - iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); - if (iir) { - intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); - ret = IRQ_HANDLED; - - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_handler(dev_priv, iir); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - spt_irq_handler(dev_priv, iir); - else - cpt_irq_handler(dev_priv, iir); - } else { - /* - * Like on previous PCH there seems to be something - * fishy going on with forwarding PCH interrupts. - */ - drm_dbg(&dev_priv->drm, - "The master control interrupt lied (SDE)!\n"); - } - } - - return ret; -} - static inline u32 gen8_master_intr_disable(void __iomem * const regs) { raw_reg_write(regs, GEN8_MASTER_IRQ, 0); @@ -2232,29 +540,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static u32 -gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) -{ - void __iomem * const regs = i915->uncore.regs; - u32 iir; - - if (!(master_ctl & GEN11_GU_MISC_IRQ)) - return 0; - - iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); - if (likely(iir)) - raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); - - return iir; -} - -static void -gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) -{ - if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(i915); -} - static inline u32 gen11_master_intr_disable(void __iomem * const regs) { raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); @@ -2273,25 +558,6 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); } -static void -gen11_display_irq_handler(struct drm_i915_private *i915) -{ - void __iomem * const regs = i915->uncore.regs; - const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); - - disable_rpm_wakeref_asserts(&i915->runtime_pm); - /* - * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ - * for the display related bits. - */ - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); - gen8_de_irq_handler(i915, disp_ctl); - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, - GEN11_DISPLAY_IRQ_ENABLE); - - enable_rpm_wakeref_asserts(&i915->runtime_pm); -} - static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; @@ -2393,184 +659,6 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ -int i8xx_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - return 0; -} - -int i915gm_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - - /* - * Vblank interrupts fail to wake the device up from C2+. - * Disabling render clock gating during C-states avoids - * the problem. There is a small power cost so we do this - * only when vblank interrupts are actually enabled. - */ - if (dev_priv->vblank_enabled++ == 0) - intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); - - return i8xx_enable_vblank(crtc); -} - -int i965_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - return 0; -} - -int ilk_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? - DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_enable_display_irq(dev_priv, bit); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - /* Even though there is no DMC, frame counter can get stuck when - * PSR is active as no frames are generated. - */ - if (HAS_PSR(dev_priv)) - drm_crtc_vblank_restore(crtc); - - return 0; -} - -static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, - bool enable) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - enum port port; - - if (!(intel_crtc->mode_flags & - (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) - return false; - - /* for dual link cases we consider TE from slave */ - if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) - port = PORT_B; - else - port = PORT_A; - - intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, - enable ? 0 : DSI_TE_EVENT); - - intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); - - return true; -} - -int bdw_enable_vblank(struct drm_crtc *_crtc) -{ - struct intel_crtc *crtc = to_intel_crtc(_crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - unsigned long irqflags; - - if (gen11_dsi_configure_te(crtc, true)) - return 0; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - /* Even if there is no DMC, frame counter can get stuck when - * PSR is active as no frames are generated, so check only for PSR. - */ - if (HAS_PSR(dev_priv)) - drm_crtc_vblank_restore(&crtc->base); - - return 0; -} - -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ -void i8xx_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void i915gm_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - - i8xx_disable_vblank(crtc); - - if (--dev_priv->vblank_enabled == 0) - intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); -} - -void i965_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void ilk_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? - DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_disable_display_irq(dev_priv, bit); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void bdw_disable_vblank(struct drm_crtc *_crtc) -{ - struct intel_crtc *crtc = to_intel_crtc(_crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - unsigned long irqflags; - - if (gen11_dsi_configure_te(crtc, false)) - return; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - static void ibx_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -2584,55 +672,6 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); } -static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - if (IS_CHERRYVIEW(dev_priv)) - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); - else - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); - - i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); - intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); - - i9xx_pipestat_irq_reset(dev_priv); - - GEN3_IRQ_RESET(uncore, VLV_); - dev_priv->irq_mask = ~0u; -} - -static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - u32 pipestat_mask; - u32 enable_mask; - enum pipe pipe; - - pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; - - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_mask); - - enable_mask = I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_LPE_PIPE_A_INTERRUPT | - I915_LPE_PIPE_B_INTERRUPT; - - if (IS_CHERRYVIEW(dev_priv)) - enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | - I915_LPE_PIPE_C_INTERRUPT; - - drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); - - dev_priv->irq_mask = ~enable_mask; - - GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); -} - /* drm_dma.h hooks */ static void ilk_irq_reset(struct drm_i915_private *dev_priv) @@ -2668,26 +707,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - enum pipe pipe; - - if (!HAS_DISPLAY(dev_priv)) - return; - - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); - - for_each_pipe(dev_priv, pipe) - if (intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); - - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); -} - static void gen8_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -2703,49 +722,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) } -static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - enum pipe pipe; - u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_D); - - if (!HAS_DISPLAY(dev_priv)) - return; - - intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); - - if (DISPLAY_VER(dev_priv) >= 12) { - enum transcoder trans; - - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { - enum intel_display_power_domain domain; - - domain = POWER_DOMAIN_TRANSCODER(trans); - if (!intel_display_power_is_enabled(dev_priv, domain)) - continue; - - intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); - intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); - } - } else { - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); - } - - for_each_pipe(dev_priv, pipe) - if (intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); - - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); - GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); - - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - GEN3_IRQ_RESET(uncore, SDE); -} - static void gen11_irq_reset(struct drm_i915_private *dev_priv) { struct intel_gt *gt = to_gt(dev_priv); @@ -2777,52 +753,6 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) GEN3_IRQ_RESET(uncore, GEN8_PCU_); } -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, - u8 pipe_mask) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 extra_ier = GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | - gen8_de_pipe_flip_done_mask(dev_priv); - enum pipe pipe; - - spin_lock_irq(&dev_priv->irq_lock); - - if (!intel_irqs_enabled(dev_priv)) { - spin_unlock_irq(&dev_priv->irq_lock); - return; - } - - for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->de_irq_mask[pipe], - ~dev_priv->de_irq_mask[pipe] | extra_ier); - - spin_unlock_irq(&dev_priv->irq_lock); -} - -void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, - u8 pipe_mask) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - enum pipe pipe; - - spin_lock_irq(&dev_priv->irq_lock); - - if (!intel_irqs_enabled(dev_priv)) { - spin_unlock_irq(&dev_priv->irq_lock); - return; - } - - for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); - - spin_unlock_irq(&dev_priv->irq_lock); - - /* make sure we're done processing display irqs */ - intel_synchronize_irq(dev_priv); -} - static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -2840,377 +770,6 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 ibx_hotplug_enables(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - switch (encoder->hpd_pin) { - case HPD_PORT_A: - /* - * When CPU and PCH are on the same package, port A - * HPD must be enabled in both north and south. - */ - return HAS_PCH_LPT_LP(i915) ? - PORTA_HOTPLUG_ENABLE : 0; - case HPD_PORT_B: - return PORTB_HOTPLUG_ENABLE | - PORTB_PULSE_DURATION_2ms; - case HPD_PORT_C: - return PORTC_HOTPLUG_ENABLE | - PORTC_PULSE_DURATION_2ms; - case HPD_PORT_D: - return PORTD_HOTPLUG_ENABLE | - PORTD_PULSE_DURATION_2ms; - default: - return 0; - } -} - -static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - /* - * Enable digital hotplug on the PCH, and configure the DP short pulse - * duration to 2ms (which is the minimum in the Display Port spec). - * The pulse duration bits are reserved on LPT+. - */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - PORTA_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | - PORTC_HOTPLUG_ENABLE | - PORTD_HOTPLUG_ENABLE | - PORTB_PULSE_DURATION_MASK | - PORTC_PULSE_DURATION_MASK | - PORTD_PULSE_DURATION_MASK, - intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); -} - -static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - ibx_hpd_detection_setup(dev_priv); -} - -static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_A: - case HPD_PORT_B: - case HPD_PORT_C: - case HPD_PORT_D: - return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin); - default: - return 0; - } -} - -static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_TC1: - case HPD_PORT_TC2: - case HPD_PORT_TC3: - case HPD_PORT_TC4: - case HPD_PORT_TC5: - case HPD_PORT_TC6: - return ICP_TC_HPD_ENABLE(encoder->hpd_pin); - default: - return 0; - } -} - -static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, - SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | - SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | - SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | - SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D), - intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); -} - -static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, - ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | - ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | - ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | - ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | - ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | - ICP_TC_HPD_ENABLE(HPD_PORT_TC6), - intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); -} - -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - - if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - icp_ddi_hpd_detection_setup(dev_priv); - icp_tc_hpd_detection_setup(dev_priv); -} - -static u32 gen11_hotplug_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_TC1: - case HPD_PORT_TC2: - case HPD_PORT_TC3: - case HPD_PORT_TC4: - case HPD_PORT_TC5: - case HPD_PORT_TC6: - return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin); - default: - return 0; - } -} - -static void dg1_hpd_invert(struct drm_i915_private *i915) -{ - u32 val = (INVERT_DDIA_HPD | - INVERT_DDIB_HPD | - INVERT_DDIC_HPD | - INVERT_DDID_HPD); - intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); -} - -static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - dg1_hpd_invert(dev_priv); - icp_hpd_irq_setup(dev_priv); -} - -static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); -} - -static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | - GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); -} - -static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); - - intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, - ~enabled_irqs & hotplug_irqs); - intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); - - gen11_tc_hpd_detection_setup(dev_priv); - gen11_tbt_hpd_detection_setup(dev_priv); - - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv); -} - -static u32 spt_hotplug_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_A: - return PORTA_HOTPLUG_ENABLE; - case HPD_PORT_B: - return PORTB_HOTPLUG_ENABLE; - case HPD_PORT_C: - return PORTC_HOTPLUG_ENABLE; - case HPD_PORT_D: - return PORTD_HOTPLUG_ENABLE; - default: - return 0; - } -} - -static u32 spt_hotplug2_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_E: - return PORTE_HOTPLUG_ENABLE; - default: - return 0; - } -} - -static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - /* Display WA #1179 WaHardHangonHotPlug: cnp */ - if (HAS_PCH_CNP(dev_priv)) { - intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, - CHASSIS_CLK_REQ_DURATION(0xf)); - } - - /* Enable digital hotplug on the PCH */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - PORTA_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | - PORTC_HOTPLUG_ENABLE | - PORTD_HOTPLUG_ENABLE, - intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); - - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE, - intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); -} - -static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - spt_hpd_detection_setup(dev_priv); -} - -static u32 ilk_hotplug_enables(struct intel_encoder *encoder) -{ - switch (encoder->hpd_pin) { - case HPD_PORT_A: - return DIGITAL_PORTA_HOTPLUG_ENABLE | - DIGITAL_PORTA_PULSE_DURATION_2ms; - default: - return 0; - } -} - -static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - /* - * Enable digital hotplug on the CPU, and configure the DP short pulse - * duration to 2ms (which is the minimum in the Display Port spec) - * The pulse duration bits are reserved on HSW+. - */ - intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, - DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK, - intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); -} - -static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); - - if (DISPLAY_VER(dev_priv) >= 8) - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - else - ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - - ilk_hpd_detection_setup(dev_priv); - - ibx_hpd_irq_setup(dev_priv); -} - -static u32 bxt_hotplug_enables(struct intel_encoder *encoder) -{ - u32 hotplug; - - switch (encoder->hpd_pin) { - case HPD_PORT_A: - hotplug = PORTA_HOTPLUG_ENABLE; - if (intel_bios_encoder_hpd_invert(encoder->devdata)) - hotplug |= BXT_DDIA_HPD_INVERT; - return hotplug; - case HPD_PORT_B: - hotplug = PORTB_HOTPLUG_ENABLE; - if (intel_bios_encoder_hpd_invert(encoder->devdata)) - hotplug |= BXT_DDIB_HPD_INVERT; - return hotplug; - case HPD_PORT_C: - hotplug = PORTC_HOTPLUG_ENABLE; - if (intel_bios_encoder_hpd_invert(encoder->devdata)) - hotplug |= BXT_DDIC_HPD_INVERT; - return hotplug; - default: - return 0; - } -} - -static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - PORTA_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | - PORTC_HOTPLUG_ENABLE | - BXT_DDI_HPD_INVERT_MASK, - intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); -} - -static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); - - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - - bxt_hpd_detection_setup(dev_priv); -} - -/* - * SDEIER is also touched by the interrupt handler to work around missed PCH - * interrupts. Hence we can't update it after the interrupt handler is enabled - - * instead we unconditionally enable all PCH interrupt sources here, but then - * only unmask them as needed with SDEIMR. - * - * Note that we currently do this after installing the interrupt handler, - * but before we enable the master interrupt. That should be sufficient - * to avoid races with the irq handler, assuming we have MSI. Shared legacy - * interrupts could still race. - */ -static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 mask; - - if (HAS_PCH_NOP(dev_priv)) - return; - - if (HAS_PCH_IBX(dev_priv)) - mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; - else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) - mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; - else - mask = SDE_GMBUS_CPT; - - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); -} - static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3254,35 +813,6 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) display_mask | extra_mask); } -void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->irq_lock); - - if (dev_priv->display_irqs_enabled) - return; - - dev_priv->display_irqs_enabled = true; - - if (intel_irqs_enabled(dev_priv)) { - vlv_display_irq_reset(dev_priv); - vlv_display_irq_postinstall(dev_priv); - } -} - -void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->irq_lock); - - if (!dev_priv->display_irqs_enabled) - return; - - dev_priv->display_irqs_enabled = false; - - if (intel_irqs_enabled(dev_priv)) - vlv_display_irq_reset(dev_priv); -} - - static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { gen5_gt_irq_postinstall(to_gt(dev_priv)); @@ -3296,94 +826,6 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); } -static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | - GEN8_PIPE_CDCLK_CRC_DONE; - u32 de_pipe_enables; - u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); - u32 de_port_enables; - u32 de_misc_masked = GEN8_DE_EDP_PSR; - u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_D); - enum pipe pipe; - - if (!HAS_DISPLAY(dev_priv)) - return; - - if (DISPLAY_VER(dev_priv) <= 10) - de_misc_masked |= GEN8_DE_MISC_GSE; - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - de_port_masked |= BXT_DE_PORT_GMBUS; - - if (DISPLAY_VER(dev_priv) >= 11) { - enum port port; - - if (intel_bios_is_dsi_present(dev_priv, &port)) - de_port_masked |= DSI0_TE | DSI1_TE; - } - - de_pipe_enables = de_pipe_masked | - GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | - gen8_de_pipe_flip_done_mask(dev_priv); - - de_port_enables = de_port_masked; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) - de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; - - if (DISPLAY_VER(dev_priv) >= 12) { - enum transcoder trans; - - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { - enum intel_display_power_domain domain; - - domain = POWER_DOMAIN_TRANSCODER(trans); - if (!intel_display_power_is_enabled(dev_priv, domain)) - continue; - - gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); - } - } else { - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); - } - - for_each_pipe(dev_priv, pipe) { - dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; - - if (intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->de_irq_mask[pipe], - de_pipe_enables); - } - - GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); - GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); - - if (DISPLAY_VER(dev_priv) >= 11) { - u32 de_hpd_masked = 0; - u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | - GEN11_DE_TBT_HOTPLUG_MASK; - - GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, - de_hpd_enables); - } -} - -static void icp_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 mask = SDE_GMBUS_ICP; - - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); -} - static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) @@ -3397,17 +839,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) gen8_master_intr_enable(dev_priv->uncore.regs); } -static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) -{ - if (!HAS_DISPLAY(dev_priv)) - return; - - gen8_de_irq_postinstall(dev_priv); - - intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, - GEN11_DISPLAY_IRQ_ENABLE); -} - static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_gt *gt = to_gt(dev_priv); @@ -3439,7 +870,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); if (HAS_DISPLAY(dev_priv)) { - icp_irq_postinstall(dev_priv); + if (DISPLAY_VER(dev_priv) >= 14) + mtp_irq_postinstall(dev_priv); + else + icp_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(dev_priv); intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); @@ -3831,31 +1266,6 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) i915_enable_asle_pipestat(dev_priv); } -static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_en; - - lockdep_assert_held(&dev_priv->irq_lock); - - /* Note HDMI and DP share hotplug bits */ - /* enable bits are the same for all generations */ - hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); - /* Programming the CRT detection parameters tends - to generate a spurious hotplug event about three - seconds later. So just do it once. - */ - if (IS_G4X(dev_priv)) - hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; - hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; - - /* Ignore TV since it's buggy */ - i915_hotplug_interrupt_update_locked(dev_priv, - HOTPLUG_INT_EN_MASK | - CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | - CRT_HOTPLUG_ACTIVATION_PERIOD_64, - hotplug_en); -} - static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; @@ -3915,30 +1325,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) return ret; } -struct intel_hotplug_funcs { - void (*hpd_irq_setup)(struct drm_i915_private *i915); -}; - -#define HPD_FUNCS(platform) \ -static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ - .hpd_irq_setup = platform##_hpd_irq_setup, \ -} - -HPD_FUNCS(i915); -HPD_FUNCS(dg1); -HPD_FUNCS(gen11); -HPD_FUNCS(bxt); -HPD_FUNCS(icp); -HPD_FUNCS(spt); -HPD_FUNCS(ilk); -#undef HPD_FUNCS - -void intel_hpd_irq_setup(struct drm_i915_private *i915) -{ - if (i915->display_irqs_enabled && i915->display.funcs.hotplug) - i915->display.funcs.hotplug->hpd_irq_setup(i915); -} - /** * intel_irq_init - initializes irq support * @dev_priv: i915 device instance @@ -3961,10 +1347,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - intel_hpd_init_pins(dev_priv); - - intel_hpd_init_early(dev_priv); - dev_priv->drm.vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on @@ -3977,25 +1359,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display_irqs_enabled = false; - if (HAS_GMCH(dev_priv)) { - if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.funcs.hotplug = &i915_hpd_funcs; - } else { - if (HAS_PCH_DG2(dev_priv)) - dev_priv->display.funcs.hotplug = &icp_hpd_funcs; - else if (HAS_PCH_DG1(dev_priv)) - dev_priv->display.funcs.hotplug = &dg1_hpd_funcs; - else if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->display.funcs.hotplug = &gen11_hpd_funcs; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->display.funcs.hotplug = &bxt_hpd_funcs; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - dev_priv->display.funcs.hotplug = &icp_hpd_funcs; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->display.funcs.hotplug = &spt_hpd_funcs; - else - dev_priv->display.funcs.hotplug = &ilk_hpd_funcs; - } + intel_hotplug_irq_init(dev_priv); } /** @@ -4140,7 +1504,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) /* * FIXME we can get called twice during driver probe * error handling as well as during driver remove due to - * intel_modeset_driver_remove() calling us out of sequence. + * intel_display_driver_remove() calling us out of sequence. * Would be nice if it didn't do that... */ if (!dev_priv->irq_enabled) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 03ee4c8b1ed3..e665a1b007dc 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -9,7 +9,7 @@ #include <linux/ktime.h> #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" enum pipe; struct drm_crtc; @@ -17,6 +17,7 @@ struct drm_device; struct drm_display_mode; struct drm_i915_private; struct intel_crtc; +struct intel_encoder; struct intel_uncore; void intel_irq_init(struct drm_i915_private *dev_priv); @@ -24,33 +25,6 @@ void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, - enum pipe pipe); -void -i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 status_mask); - -void -i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 status_mask); - -void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); -void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); - -void intel_hpd_irq_setup(struct drm_i915_private *i915); -void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - u32 mask, - u32 bits); - -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits); -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits); - -void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); -void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); - -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits); -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); - void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv); @@ -66,23 +40,7 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv); void intel_synchronize_irq(struct drm_i915_private *i915); void intel_synchronize_hardirq(struct drm_i915_private *i915); -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, - u8 pipe_mask); -void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, - u8 pipe_mask); -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv); - - -int i8xx_enable_vblank(struct drm_crtc *crtc); -int i915gm_enable_vblank(struct drm_crtc *crtc); -int i965_enable_vblank(struct drm_crtc *crtc); -int ilk_enable_vblank(struct drm_crtc *crtc); -int bdw_enable_vblank(struct drm_crtc *crtc); -void i8xx_disable_vblank(struct drm_crtc *crtc); -void i915gm_disable_vblank(struct drm_crtc *crtc); -void i965_disable_vblank(struct drm_crtc *crtc); -void ilk_disable_vblank(struct drm_crtc *crtc); -void bdw_disable_vblank(struct drm_crtc *crtc); +void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg); void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, i915_reg_t iir, i915_reg_t ier); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 75cbccd1a441..3d7a5db9833b 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -27,6 +27,7 @@ #include <drm/i915_pciids.h> #include "display/intel_display.h" +#include "display/intel_display_driver.h" #include "gt/intel_gt_regs.h" #include "gt/intel_sa_media.h" #include "gem/i915_gem_object_types.h" @@ -37,132 +38,13 @@ #include "i915_reg.h" #include "intel_pci_config.h" +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); + #define PLATFORM(x) .platform = (x) #define GEN(x) \ .__runtime.graphics.ip.ver = (x), \ - .__runtime.media.ip.ver = (x), \ - .__runtime.display.ip.ver = (x) - -#define NO_DISPLAY .__runtime.pipe_mask = 0 - -#define I845_PIPE_OFFSETS \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - } - -#define I9XX_PIPE_OFFSETS \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - } - -#define IVB_PIPE_OFFSETS \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = PIPE_C_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ - } - -#define HSW_PIPE_OFFSETS \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = PIPE_C_OFFSET, \ - [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ - [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ - } - -#define CHV_PIPE_OFFSETS \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ - } - -#define I845_CURSOR_OFFSETS \ - .display.cursor_offsets = { \ - [PIPE_A] = CURSOR_A_OFFSET, \ - } - -#define I9XX_CURSOR_OFFSETS \ - .display.cursor_offsets = { \ - [PIPE_A] = CURSOR_A_OFFSET, \ - [PIPE_B] = CURSOR_B_OFFSET, \ - } - -#define CHV_CURSOR_OFFSETS \ - .display.cursor_offsets = { \ - [PIPE_A] = CURSOR_A_OFFSET, \ - [PIPE_B] = CURSOR_B_OFFSET, \ - [PIPE_C] = CHV_CURSOR_C_OFFSET, \ - } - -#define IVB_CURSOR_OFFSETS \ - .display.cursor_offsets = { \ - [PIPE_A] = CURSOR_A_OFFSET, \ - [PIPE_B] = IVB_CURSOR_B_OFFSET, \ - [PIPE_C] = IVB_CURSOR_C_OFFSET, \ - } - -#define TGL_CURSOR_OFFSETS \ - .display.cursor_offsets = { \ - [PIPE_A] = CURSOR_A_OFFSET, \ - [PIPE_B] = IVB_CURSOR_B_OFFSET, \ - [PIPE_C] = IVB_CURSOR_C_OFFSET, \ - [PIPE_D] = TGL_CURSOR_D_OFFSET, \ - } - -#define I845_COLORS \ - .display.color = { .gamma_lut_size = 256 } -#define I9XX_COLORS \ - .display.color = { .gamma_lut_size = 129, \ - .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ - } -#define ILK_COLORS \ - .display.color = { .gamma_lut_size = 1024 } -#define IVB_COLORS \ - .display.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 } -#define CHV_COLORS \ - .display.color = { \ - .degamma_lut_size = 65, .gamma_lut_size = 257, \ - .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ - .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ - } -#define GLK_COLORS \ - .display.color = { \ - .degamma_lut_size = 33, .gamma_lut_size = 1024, \ - .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ - DRM_COLOR_LUT_EQUAL_CHANNELS, \ - } -#define ICL_COLORS \ - .display.color = { \ - .degamma_lut_size = 33, .gamma_lut_size = 262145, \ - .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ - DRM_COLOR_LUT_EQUAL_CHANNELS, \ - .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ - } + .__runtime.media.ip.ver = (x) #define LEGACY_CACHELEVEL \ .cachelevel_to_pat = { \ @@ -207,12 +89,6 @@ #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ - .display.has_overlay = 1, \ - .display.cursor_needs_physical = 1, \ - .display.overlay_needs_physical = 1, \ - .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .has_3d_pipeline = 1, \ .hws_needs_physical = 1, \ @@ -222,20 +98,12 @@ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ .max_pat_index = 3, \ - I9XX_PIPE_OFFSETS, \ - I9XX_CURSOR_OFFSETS, \ - I9XX_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL #define I845_FEATURES \ GEN(2), \ - .__runtime.pipe_mask = BIT(PIPE_A), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \ - .display.has_overlay = 1, \ - .display.overlay_needs_physical = 1, \ - .display.has_gmch = 1, \ .has_3d_pipeline = 1, \ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ @@ -245,9 +113,6 @@ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ .max_pat_index = 3, \ - I845_PIPE_OFFSETS, \ - I845_CURSOR_OFFSETS, \ - I845_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -265,20 +130,15 @@ static const struct intel_device_info i845g_info = { static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), - .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), - .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_FEATURES \ GEN(3), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ - .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ @@ -286,9 +146,6 @@ static const struct intel_device_info i865g_info = { .has_coherent_ggtt = true, \ .dma_mask_size = 32, \ .max_pat_index = 3, \ - I9XX_PIPE_OFFSETS, \ - I9XX_CURSOR_OFFSETS, \ - I9XX_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -297,9 +154,6 @@ static const struct intel_device_info i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), .has_coherent_ggtt = false, - .display.cursor_needs_physical = 1, - .display.has_overlay = 1, - .display.overlay_needs_physical = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -308,11 +162,6 @@ static const struct intel_device_info i915gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I915GM), .is_mobile = 1, - .display.cursor_needs_physical = 1, - .display.has_overlay = 1, - .display.overlay_needs_physical = 1, - .display.supports_tv = 1, - .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -320,10 +169,6 @@ static const struct intel_device_info i915gm_info = { static const struct intel_device_info i945g_info = { GEN3_FEATURES, PLATFORM(INTEL_I945G), - .display.has_hotplug = 1, - .display.cursor_needs_physical = 1, - .display.has_overlay = 1, - .display.overlay_needs_physical = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -332,12 +177,6 @@ static const struct intel_device_info i945gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I945GM), .is_mobile = 1, - .display.has_hotplug = 1, - .display.cursor_needs_physical = 1, - .display.has_overlay = 1, - .display.overlay_needs_physical = 1, - .display.supports_tv = 1, - .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -345,16 +184,12 @@ static const struct intel_device_info i945gm_info = { static const struct intel_device_info g33_info = { GEN3_FEATURES, PLATFORM(INTEL_G33), - .display.has_hotplug = 1, - .display.has_overlay = 1, .dma_mask_size = 36, }; static const struct intel_device_info pnv_g_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), - .display.has_hotplug = 1, - .display.has_overlay = 1, .dma_mask_size = 36, }; @@ -362,17 +197,11 @@ static const struct intel_device_info pnv_m_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .is_mobile = 1, - .display.has_hotplug = 1, - .display.has_overlay = 1, .dma_mask_size = 36, }; #define GEN4_FEATURES \ GEN(4), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ - .display.has_hotplug = 1, \ - .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ @@ -380,9 +209,6 @@ static const struct intel_device_info pnv_m_info = { .has_coherent_ggtt = true, \ .dma_mask_size = 36, \ .max_pat_index = 3, \ - I9XX_PIPE_OFFSETS, \ - I9XX_CURSOR_OFFSETS, \ - I9XX_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -390,7 +216,6 @@ static const struct intel_device_info pnv_m_info = { static const struct intel_device_info i965g_info = { GEN4_FEATURES, PLATFORM(INTEL_I965G), - .display.has_overlay = 1, .hws_needs_physical = 1, .has_snoop = false, }; @@ -399,9 +224,6 @@ static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, - .__runtime.fbc_mask = BIT(INTEL_FBC_A), - .display.has_overlay = 1, - .display.supports_tv = 1, .hws_needs_physical = 1, .has_snoop = false, }; @@ -417,17 +239,12 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .__runtime.fbc_mask = BIT(INTEL_FBC_A), - .display.supports_tv = 1, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; #define GEN5_FEATURES \ GEN(5), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ - .display.has_hotplug = 1, \ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ @@ -436,9 +253,6 @@ static const struct intel_device_info gm45_info = { .has_rc6 = 0, \ .dma_mask_size = 36, \ .max_pat_index = 3, \ - I9XX_PIPE_OFFSETS, \ - I9XX_CURSOR_OFFSETS, \ - ILK_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -453,15 +267,10 @@ static const struct intel_device_info ilk_m_info = { PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, .has_rps = true, - .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN6_FEATURES \ GEN(6), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ - .display.has_hotplug = 1, \ - .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ @@ -474,9 +283,6 @@ static const struct intel_device_info ilk_m_info = { .max_pat_index = 3, \ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ .__runtime.ppgtt_size = 31, \ - I9XX_PIPE_OFFSETS, \ - I9XX_CURSOR_OFFSETS, \ - ILK_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -513,10 +319,6 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ - .display.has_hotplug = 1, \ - .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ @@ -529,9 +331,6 @@ static const struct intel_device_info snb_m_gt2_info = { .max_pat_index = 3, \ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ .__runtime.ppgtt_size = 31, \ - IVB_PIPE_OFFSETS, \ - IVB_CURSOR_OFFSETS, \ - IVB_COLORS, \ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -570,7 +369,6 @@ static const struct intel_device_info ivb_m_gt2_info = { static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), - NO_DISPLAY, .gt = 2, .has_l3_dpf = 1, }; @@ -579,14 +377,10 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, .has_rps = true, - .display.has_gmch = 1, - .display.has_hotplug = 1, .dma_mask_size = 40, .max_pat_index = 3, .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, @@ -594,10 +388,6 @@ static const struct intel_device_info vlv_info = { .has_snoop = true, .has_coherent_ggtt = false, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), - .display.mmio_offset = VLV_DISPLAY_BASE, - I9XX_PIPE_OFFSETS, - I9XX_CURSOR_OFFSETS, - I9XX_COLORS, GEN_DEFAULT_PAGE_SIZES, GEN_DEFAULT_REGIONS, LEGACY_CACHELEVEL, @@ -606,13 +396,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ - .display.has_ddi = 1, \ - .display.has_fpga_dbg = 1, \ - .display.has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ - HSW_PIPE_OFFSETS, \ .has_runtime_pm = 1 #define HSW_PLATFORM \ @@ -676,9 +460,6 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), - .display.has_hotplug = 1, .is_lp = 1, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, @@ -686,7 +467,6 @@ static const struct intel_device_info chv_info = { .has_rc6 = 1, .has_rps = true, .has_logical_ring_contexts = 1, - .display.has_gmch = 1, .dma_mask_size = 39, .max_pat_index = 3, .__runtime.ppgtt_type = INTEL_PPGTT_FULL, @@ -694,10 +474,6 @@ static const struct intel_device_info chv_info = { .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, - .display.mmio_offset = VLV_DISPLAY_BASE, - CHV_PIPE_OFFSETS, - CHV_CURSOR_OFFSETS, - CHV_COLORS, GEN_DEFAULT_PAGE_SIZES, GEN_DEFAULT_REGIONS, LEGACY_CACHELEVEL, @@ -711,14 +487,7 @@ static const struct intel_device_info chv_info = { GEN8_FEATURES, \ GEN(9), \ GEN9_DEFAULT_PAGE_SIZES, \ - .__runtime.has_dmc = 1, \ - .has_gt_uc = 1, \ - .__runtime.has_hdcp = 1, \ - .display.has_ipc = 1, \ - .display.has_psr = 1, \ - .display.has_psr_hw_tracking = 1, \ - .display.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \ - .display.dbuf.slice_mask = BIT(DBUF_S1) + .has_gt_uc = 1 #define SKL_PLATFORM \ GEN9_FEATURES, \ @@ -753,26 +522,12 @@ static const struct intel_device_info skl_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ .is_lp = 1, \ - .display.dbuf.slice_mask = BIT(DBUF_S1), \ - .display.has_hotplug = 1, \ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ - BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ - .display.has_ddi = 1, \ - .display.has_fpga_dbg = 1, \ - .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ - .__runtime.has_hdcp = 1, \ - .display.has_psr = 1, \ - .display.has_psr_hw_tracking = 1, \ .has_runtime_pm = 1, \ - .__runtime.has_dmc = 1, \ .has_rc6 = 1, \ .has_rps = true, \ - .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_gt_uc = 1, \ .dma_mask_size = 39, \ @@ -781,11 +536,7 @@ static const struct intel_device_info skl_gt4_info = { .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ - .display.has_ipc = 1, \ .max_pat_index = 3, \ - HSW_PIPE_OFFSETS, \ - IVB_CURSOR_OFFSETS, \ - IVB_COLORS, \ GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS, \ LEGACY_CACHELEVEL @@ -793,15 +544,11 @@ static const struct intel_device_info skl_gt4_info = { static const struct intel_device_info bxt_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_BROXTON), - .display.dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */ }; static const struct intel_device_info glk_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_GEMINILAKE), - .__runtime.display.ip.ver = 10, - .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ - GLK_COLORS, }; #define KBL_PLATFORM \ @@ -868,31 +615,7 @@ static const struct intel_device_info cml_gt2_info = { #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ - .display.abox_mask = BIT(0), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ - BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = PIPE_C_OFFSET, \ - [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ - [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ - [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ - [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ - }, \ GEN(11), \ - ICL_COLORS, \ - .display.dbuf.size = 2048, \ - .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ - .__runtime.has_dsc = 1, \ .has_coherent_ggtt = false, \ .has_logical_ring_elsq = 1 @@ -920,32 +643,9 @@ static const struct intel_device_info jsl_info = { #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ - .display.abox_mask = GENMASK(2, 1), \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ - BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = PIPE_C_OFFSET, \ - [TRANSCODER_D] = PIPE_D_OFFSET, \ - [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ - [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ - [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ - }, \ - TGL_CURSOR_OFFSETS, \ TGL_CACHELEVEL, \ .has_global_mocs = 1, \ .has_pxp = 1, \ - .display.has_dsb = 1, \ .max_pat_index = 3 static const struct intel_device_info tgl_info = { @@ -958,12 +658,6 @@ static const struct intel_device_info tgl_info = { static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), - .display.abox_mask = BIT(0), - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C), - .display.has_hti = 1, - .display.has_psr_hw_tracking = 0, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; @@ -981,7 +675,6 @@ static const struct intel_device_info dg1_info = { DGFX_FEATURES, .__runtime.graphics.ip.rel = 10, PLATFORM(INTEL_DG1), - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | @@ -993,64 +686,14 @@ static const struct intel_device_info dg1_info = { static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .display.has_hti = 1, - .display.has_psr_hw_tracking = 0, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .dma_mask_size = 39, }; -#define XE_LPD_FEATURES \ - .display.abox_mask = GENMASK(1, 0), \ - .display.color = { \ - .degamma_lut_size = 129, .gamma_lut_size = 1024, \ - .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ - DRM_COLOR_LUT_EQUAL_CHANNELS, \ - }, \ - .display.dbuf.size = 4096, \ - .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ - BIT(DBUF_S4), \ - .display.has_ddi = 1, \ - .__runtime.has_dmc = 1, \ - .display.has_dp_mst = 1, \ - .display.has_dsb = 1, \ - .__runtime.has_dsc = 1, \ - .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ - .display.has_fpga_dbg = 1, \ - .__runtime.has_hdcp = 1, \ - .display.has_hotplug = 1, \ - .display.has_ipc = 1, \ - .display.has_psr = 1, \ - .__runtime.display.ip.ver = 13, \ - .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .display.pipe_offsets = { \ - [TRANSCODER_A] = PIPE_A_OFFSET, \ - [TRANSCODER_B] = PIPE_B_OFFSET, \ - [TRANSCODER_C] = PIPE_C_OFFSET, \ - [TRANSCODER_D] = PIPE_D_OFFSET, \ - [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ - }, \ - .display.trans_offsets = { \ - [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ - [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ - [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ - [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ - [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ - [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ - }, \ - TGL_CURSOR_OFFSETS - static const struct intel_device_info adl_p_info = { GEN12_FEATURES, - XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | - BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), - .display.has_cdclk_crawl = 1, - .display.has_psr_hw_tracking = 0, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .__runtime.ppgtt_size = 48, @@ -1101,7 +744,6 @@ static const struct intel_device_info xehpsdv_info = { XE_HPM_FEATURES, DGFX_FEATURES, PLATFORM(INTEL_XEHPSDV), - NO_DISPLAY, .has_64k_pages = 1, .has_media_ratio_mode = 1, .__runtime.platform_engine_mask = @@ -1124,7 +766,6 @@ static const struct intel_device_info xehpsdv_info = { .has_guc_deprivilege = 1, \ .has_heci_pxp = 1, \ .has_media_ratio_mode = 1, \ - .display.has_cdclk_squash = 1, \ .__runtime.platform_engine_mask = \ BIT(RCS0) | BIT(BCS0) | \ BIT(VECS0) | BIT(VECS1) | \ @@ -1133,14 +774,10 @@ static const struct intel_device_info xehpsdv_info = { static const struct intel_device_info dg2_info = { DG2_FEATURES, - XE_LPD_FEATURES, - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; static const struct intel_device_info ats_m_info = { DG2_FEATURES, - NO_DISPLAY, .require_force_probe = 1, .tuning_thread_rr_after_dep = 1, }; @@ -1162,7 +799,6 @@ static const struct intel_device_info pvc_info = { .__runtime.graphics.ip.rel = 60, .__runtime.media.ip.rel = 60, PLATFORM(INTEL_PONTEVECCHIO), - NO_DISPLAY, .has_flat_ccs = 0, .max_pat_index = 7, .__runtime.platform_engine_mask = @@ -1173,13 +809,6 @@ static const struct intel_device_info pvc_info = { PVC_CACHELEVEL, }; -#define XE_LPDP_FEATURES \ - XE_LPD_FEATURES, \ - .__runtime.display.ip.ver = 14, \ - .display.has_cdclk_crawl = 1, \ - .display.has_cdclk_squash = 1, \ - .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) - static const struct intel_gt_definition xelpmp_extra_gt[] = { { .type = GT_MEDIA, @@ -1192,9 +821,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = { static const struct intel_device_info mtl_info = { XE_HP_FEATURES, - XE_LPDP_FEATURES, - .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), /* * Real graphics IP version will be obtained from hardware GMD_ID * register. Value provided here is just for sanity checking. @@ -1220,6 +846,8 @@ static const struct intel_device_info mtl_info = { #undef PLATFORM +__diag_pop(); + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -1425,7 +1053,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENXIO; /* Detect if we need to wait for other drivers early on */ - if (intel_modeset_probe_defer(pdev)) + if (intel_display_driver_probe_defer(pdev)) return -EPROBE_DEFER; err = i915_driver_probe(pdev, ent); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c4197e31962e..0523418129c5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1343,9 +1343,9 @@ #define SNB_FBC_FRONT_BUFFER REG_BIT(1) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) -#define ILK_FBCQ_DIS (1 << 22) -#define ILK_PABSTRETCH_DIS REG_BIT(21) -#define ILK_SABSTRETCH_DIS REG_BIT(20) +#define ILK_FBCQ_DIS REG_BIT(22) +#define ILK_PABSTRETCH_DIS REG_BIT(21) +#define ILK_SABSTRETCH_DIS REG_BIT(20) #define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20) #define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0) #define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1) @@ -1809,6 +1809,11 @@ #define CLKGATE_DIS_PSL_EXT(pipe) \ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B) +/* DDI Buffer Control */ +#define _DDI_CLK_VALFREQ_A 0x64030 +#define _DDI_CLK_VALFREQ_B 0x64130 +#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B) + /* * Display engine regs */ @@ -1961,15 +1966,6 @@ #define _TRANS_VSYNC_DSI1 0x6b814 #define _TRANS_VSYNCSHIFT_DSI1 0x6b828 -#define TRANSCODER_A_OFFSET 0x60000 -#define TRANSCODER_B_OFFSET 0x61000 -#define TRANSCODER_C_OFFSET 0x62000 -#define CHV_TRANSCODER_C_OFFSET 0x63000 -#define TRANSCODER_D_OFFSET 0x63000 -#define TRANSCODER_EDP_OFFSET 0x6f000 -#define TRANSCODER_DSI0_OFFSET 0x6b000 -#define TRANSCODER_DSI1_OFFSET 0x6b800 - #define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A) #define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A) #define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A) @@ -2332,35 +2328,33 @@ /* Panel fitting */ #define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230) -#define PFIT_ENABLE (1 << 31) -#define PFIT_PIPE_MASK (3 << 29) -#define PFIT_PIPE_SHIFT 29 -#define PFIT_PIPE(pipe) ((pipe) << 29) -#define VERT_INTERP_DISABLE (0 << 10) -#define VERT_INTERP_BILINEAR (1 << 10) -#define VERT_INTERP_MASK (3 << 10) -#define VERT_AUTO_SCALE (1 << 9) -#define HORIZ_INTERP_DISABLE (0 << 6) -#define HORIZ_INTERP_BILINEAR (1 << 6) -#define HORIZ_INTERP_MASK (3 << 6) -#define HORIZ_AUTO_SCALE (1 << 5) -#define PANEL_8TO6_DITHER_ENABLE (1 << 3) -#define PFIT_FILTER_FUZZY (0 << 24) -#define PFIT_SCALING_AUTO (0 << 26) -#define PFIT_SCALING_PROGRAMMED (1 << 26) -#define PFIT_SCALING_PILLAR (2 << 26) -#define PFIT_SCALING_LETTER (3 << 26) +#define PFIT_ENABLE REG_BIT(31) +#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */ +#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe)) +#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */ +#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0) +#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1) +#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2) +#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3) +#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */ +#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0) +#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1) +#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2) +#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */ +#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1) +#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */ +#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */ +#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1) +#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */ +#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */ + #define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234) -/* Pre-965 */ -#define PFIT_VERT_SCALE_SHIFT 20 -#define PFIT_VERT_SCALE_MASK 0xfff00000 -#define PFIT_HORIZ_SCALE_SHIFT 4 -#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 -/* 965+ */ -#define PFIT_VERT_SCALE_SHIFT_965 16 -#define PFIT_VERT_SCALE_MASK_965 0x1fff0000 -#define PFIT_HORIZ_SCALE_SHIFT_965 0 -#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff +#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */ +#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x)) +#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */ +#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x)) +#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */ +#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */ #define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238) @@ -2550,6 +2544,7 @@ #define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ #define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x)) #define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16) +#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */ #define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14) #define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13) #define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ @@ -2619,23 +2614,6 @@ #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 #define PIPESTAT_INT_STATUS_MASK 0x0000ffff -#define PIPE_A_OFFSET 0x70000 -#define PIPE_B_OFFSET 0x71000 -#define PIPE_C_OFFSET 0x72000 -#define PIPE_D_OFFSET 0x73000 -#define CHV_PIPE_C_OFFSET 0x74000 -/* - * There's actually no pipe EDP. Some pipe registers have - * simply shifted from the pipe to the transcoder, while - * keeping their original offset. Thus we need PIPE_EDP_OFFSET - * to access such registers in transcoder EDP. - */ -#define PIPE_EDP_OFFSET 0x7f000 - -/* ICL DSI 0 and 1 */ -#define PIPE_DSI0_OFFSET 0x7b000 -#define PIPE_DSI1_OFFSET 0x7b800 - #define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF) #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) #define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) @@ -2655,8 +2633,13 @@ #define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ #define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ #define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ +#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */ +#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */ +#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */ +#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */ +#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20) #define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) -#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ +#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ /* * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with * valid values of: 6, 8, 10 BPC. @@ -3091,13 +3074,6 @@ #define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A) #define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE) -#define CURSOR_A_OFFSET 0x70080 -#define CURSOR_B_OFFSET 0x700c0 -#define CHV_CURSOR_C_OFFSET 0x700e0 -#define IVB_CURSOR_B_OFFSET 0x71080 -#define IVB_CURSOR_C_OFFSET 0x72080 -#define TGL_CURSOR_D_OFFSET 0x73080 - /* Display A control */ #define _DSPAADDR_VLV 0x7017C /* vlv/chv */ #define _DSPACNTR 0x70180 @@ -4005,20 +3981,28 @@ /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ -#define _PFA_CTL_1 0x68080 -#define _PFB_CTL_1 0x68880 -#define PF_ENABLE (1 << 31) -#define PF_PIPE_SEL_MASK_IVB (3 << 29) -#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29) -#define PF_FILTER_MASK (3 << 23) -#define PF_FILTER_PROGRAMMED (0 << 23) -#define PF_FILTER_MED_3x3 (1 << 23) -#define PF_FILTER_EDGE_ENHANCE (2 << 23) -#define PF_FILTER_EDGE_SOFTEN (3 << 23) +#define _PFA_CTL_1 0x68080 +#define _PFB_CTL_1 0x68880 +#define PF_ENABLE REG_BIT(31) +#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */ +#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe)) +#define PF_FILTER_MASK REG_GENMASK(24, 23) +#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0) +#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1) +#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2) +#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3) #define _PFA_WIN_SZ 0x68074 #define _PFB_WIN_SZ 0x68874 +#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16) +#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w)) +#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0) +#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h)) #define _PFA_WIN_POS 0x68070 #define _PFB_WIN_POS 0x68870 +#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16) +#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x)) +#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0) +#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y)) #define _PFA_VSCALE 0x68084 #define _PFB_VSCALE 0x68884 #define _PFA_HSCALE 0x68090 @@ -4030,18 +4014,6 @@ #define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) #define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) -#define _PSA_CTL 0x68180 -#define _PSB_CTL 0x68980 -#define PS_ENABLE (1 << 31) -#define _PSA_WIN_SZ 0x68174 -#define _PSB_WIN_SZ 0x68974 -#define _PSA_WIN_POS 0x68170 -#define _PSB_WIN_POS 0x68970 - -#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL) -#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) -#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) - /* * Skylake scalers */ @@ -4050,63 +4022,89 @@ #define _PS_1B_CTRL 0x68980 #define _PS_2B_CTRL 0x68A80 #define _PS_1C_CTRL 0x69180 -#define PS_SCALER_EN (1 << 31) -#define SKL_PS_SCALER_MODE_MASK (3 << 28) -#define SKL_PS_SCALER_MODE_DYN (0 << 28) -#define SKL_PS_SCALER_MODE_HQ (1 << 28) -#define SKL_PS_SCALER_MODE_NV12 (2 << 28) -#define PS_SCALER_MODE_PLANAR (1 << 29) -#define PS_SCALER_MODE_NORMAL (0 << 29) -#define PS_PLANE_SEL_MASK (7 << 25) -#define PS_PLANE_SEL(plane) (((plane) + 1) << 25) -#define PS_FILTER_MASK (3 << 23) -#define PS_FILTER_MEDIUM (0 << 23) -#define PS_FILTER_PROGRAMMED (1 << 23) -#define PS_FILTER_EDGE_ENHANCE (2 << 23) -#define PS_FILTER_BILINEAR (3 << 23) -#define PS_VERT3TAP (1 << 21) -#define PS_VERT_INT_INVERT_FIELD1 (0 << 20) -#define PS_VERT_INT_INVERT_FIELD0 (1 << 20) -#define PS_PWRUP_PROGRESS (1 << 17) -#define PS_V_FILTER_BYPASS (1 << 8) -#define PS_VADAPT_EN (1 << 7) -#define PS_VADAPT_MODE_MASK (3 << 5) -#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) -#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) -#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) -#define PS_PLANE_Y_SEL_MASK (7 << 5) -#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5) -#define PS_Y_VERT_FILTER_SELECT(set) ((set) << 4) -#define PS_Y_HORZ_FILTER_SELECT(set) ((set) << 3) -#define PS_UV_VERT_FILTER_SELECT(set) ((set) << 2) -#define PS_UV_HORZ_FILTER_SELECT(set) ((set) << 1) +#define PS_SCALER_EN REG_BIT(31) +#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */ +#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0) +#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1) +#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */ +#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0) +#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1) +#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2) +#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */ +#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0) +#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1) +#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */ +#define PS_BINDING_MASK REG_GENMASK(27, 25) +#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0) +#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1) +#define PS_FILTER_MASK REG_GENMASK(24, 23) +#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0) +#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1) +#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2) +#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3) +#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */ +#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0) +#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1) +#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */ +#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */ +#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */ +#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */ +#define PS_VERT_INT_INVERT_FIELD REG_BIT(20) +#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */ +#define PS_PWRUP_PROGRESS REG_BIT(17) +#define PS_V_FILTER_BYPASS REG_BIT(8) +#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */ +#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */ +#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0) +#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1) +#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3) +#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */ +#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1) +#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */ +#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set)) +#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */ +#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set)) +#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */ +#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set)) +#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */ +#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set)) #define _PS_PWR_GATE_1A 0x68160 #define _PS_PWR_GATE_2A 0x68260 #define _PS_PWR_GATE_1B 0x68960 #define _PS_PWR_GATE_2B 0x68A60 #define _PS_PWR_GATE_1C 0x69160 -#define PS_PWR_GATE_DIS_OVERRIDE (1 << 31) -#define PS_PWR_GATE_SETTLING_TIME_32 (0 << 3) -#define PS_PWR_GATE_SETTLING_TIME_64 (1 << 3) -#define PS_PWR_GATE_SETTLING_TIME_96 (2 << 3) -#define PS_PWR_GATE_SETTLING_TIME_128 (3 << 3) -#define PS_PWR_GATE_SLPEN_8 0 -#define PS_PWR_GATE_SLPEN_16 1 -#define PS_PWR_GATE_SLPEN_24 2 -#define PS_PWR_GATE_SLPEN_32 3 +#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31) +#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3) +#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0) +#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1) +#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2) +#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3) +#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0) +#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0) +#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1) +#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2) +#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3) #define _PS_WIN_POS_1A 0x68170 #define _PS_WIN_POS_2A 0x68270 #define _PS_WIN_POS_1B 0x68970 #define _PS_WIN_POS_2B 0x68A70 #define _PS_WIN_POS_1C 0x69170 +#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16) +#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x)) +#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0) +#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y)) #define _PS_WIN_SZ_1A 0x68174 #define _PS_WIN_SZ_2A 0x68274 #define _PS_WIN_SZ_1B 0x68974 #define _PS_WIN_SZ_2B 0x68A74 #define _PS_WIN_SZ_1C 0x69174 +#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16) +#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w)) +#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0) +#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h)) #define _PS_VSCALE_1A 0x68184 #define _PS_VSCALE_2A 0x68284 @@ -4125,10 +4123,12 @@ #define _PS_VPHASE_1B 0x68988 #define _PS_VPHASE_2B 0x68A88 #define _PS_VPHASE_1C 0x69188 -#define PS_Y_PHASE(x) ((x) << 16) -#define PS_UV_RGB_PHASE(x) ((x) << 0) -#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */ -#define PS_PHASE_TRIP (1 << 0) +#define PS_Y_PHASE_MASK REG_GENMASK(31, 16) +#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x)) +#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0) +#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x)) +#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */ +#define PS_PHASE_TRIP (1 << 0) #define _PS_HPHASE_1A 0x68194 #define _PS_HPHASE_2A 0x68294 @@ -4146,7 +4146,7 @@ #define _PS_COEF_SET0_INDEX_2A 0x68298 #define _PS_COEF_SET0_INDEX_1B 0x68998 #define _PS_COEF_SET0_INDEX_2B 0x68A98 -#define PS_COEE_INDEX_AUTO_INC (1 << 10) +#define PS_COEF_INDEX_AUTO_INC REG_BIT(10) #define _PS_COEF_SET0_DATA_1A 0x6819C #define _PS_COEF_SET0_DATA_2A 0x6829C @@ -4482,56 +4482,79 @@ #define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4)) #define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define PICAINTERRUPT_ISR _MMIO(0x16FE50) +#define PICAINTERRUPT_IMR _MMIO(0x16FE54) +#define PICAINTERRUPT_IIR _MMIO(0x16FE58) +#define PICAINTERRUPT_IER _MMIO(0x16FE5C) + +#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin)) +#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16) + +#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin)) +#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8) + +#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin)) +#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0) + +#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200)) +#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6) +#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5) +#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4) +#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2) +#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1) +#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0) + #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ -#define ILK_ELPIN_409_SELECT (1 << 25) -#define ILK_DPARB_GATE (1 << 22) -#define ILK_VSDPFD_FULL (1 << 21) -#define FUSE_STRAP _MMIO(0x42014) -#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) -#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) -#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) -#define IVB_PIPE_C_DISABLE (1 << 28) -#define ILK_HDCP_DISABLE (1 << 25) -#define ILK_eDP_A_DISABLE (1 << 24) -#define HSW_CDCLK_LIMIT (1 << 24) -#define ILK_DESKTOP (1 << 23) -#define HSW_CPU_SSC_ENABLE (1 << 21) - -#define FUSE_STRAP3 _MMIO(0x42020) -#define HSW_REF_CLK_SELECT (1 << 1) - -#define ILK_DSPCLK_GATE_D _MMIO(0x42020) -#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) -#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) -#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) -#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) -#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) - -#define IVB_CHICKEN3 _MMIO(0x4200c) -# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) -# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) - -#define CHICKEN_PAR1_1 _MMIO(0x42080) -#define IGNORE_KVMR_PIPE_A REG_BIT(23) -#define KBL_ARB_FILL_SPARE_22 REG_BIT(22) -#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16) -#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) -#define DPA_MASK_VBLANK_SRD (1 << 15) -#define FORCE_ARB_IDLE_PLANES (1 << 14) -#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) -#define IGNORE_PSR2_HW_TRACKING (1 << 1) +#define ILK_ELPIN_409_SELECT REG_BIT(25) +#define ILK_DPARB_GATE REG_BIT(22) +#define ILK_VSDPFD_FULL REG_BIT(21) + +#define FUSE_STRAP _MMIO(0x42014) +#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31) +#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30) +#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29) +#define IVB_PIPE_C_DISABLE REG_BIT(28) +#define ILK_HDCP_DISABLE REG_BIT(25) +#define ILK_eDP_A_DISABLE REG_BIT(24) +#define HSW_CDCLK_LIMIT REG_BIT(24) +#define ILK_DESKTOP REG_BIT(23) +#define HSW_CPU_SSC_ENABLE REG_BIT(21) + +#define FUSE_STRAP3 _MMIO(0x42020) +#define HSW_REF_CLK_SELECT REG_BIT(1) + +#define ILK_DSPCLK_GATE_D _MMIO(0x42020) +#define ILK_VRHUNIT_CLOCK_GATE_DISABLE REG_BIT(28) +#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE REG_BIT(9) +#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE REG_BIT(8) +#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE REG_BIT(7) +#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE REG_BIT(5) + +#define IVB_CHICKEN3 _MMIO(0x4200c) +#define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE REG_BIT(5) +#define CHICKEN3_DGMG_DONE_FIX_DISABLE REG_BIT(2) + +#define CHICKEN_PAR1_1 _MMIO(0x42080) +#define IGNORE_KVMR_PIPE_A REG_BIT(23) +#define KBL_ARB_FILL_SPARE_22 REG_BIT(22) +#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16) +#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15) +#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */ +#define FORCE_ARB_IDLE_PLANES REG_BIT(14) +#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3) +#define IGNORE_PSR2_HW_TRACKING REG_BIT(1) #define CHICKEN_PAR2_1 _MMIO(0x42090) -#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14) +#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14) #define CHICKEN_MISC_2 _MMIO(0x42084) -#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */ -#define KBL_ARB_FILL_SPARE_14 REG_BIT(14) -#define KBL_ARB_FILL_SPARE_13 REG_BIT(13) -#define GLK_CL2_PWR_DOWN (1 << 12) -#define GLK_CL1_PWR_DOWN (1 << 11) -#define GLK_CL0_PWR_DOWN (1 << 10) +#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */ +#define KBL_ARB_FILL_SPARE_14 REG_BIT(14) +#define KBL_ARB_FILL_SPARE_13 REG_BIT(13) +#define GLK_CL2_PWR_DOWN REG_BIT(12) +#define GLK_CL1_PWR_DOWN REG_BIT(11) +#define GLK_CL0_PWR_DOWN REG_BIT(10) #define CHICKEN_MISC_4 _MMIO(0x4208c) #define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13) @@ -4540,24 +4563,26 @@ #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 -#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) -#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) -#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) -#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) -#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) -#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) -#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) -#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) -#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) -#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) -#define HSW_FBCQ_DIS (1 << 22) -#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) -#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0) -#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0) -#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1) -#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2) -#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3) -#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) +#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) +#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) +#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) +#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) +#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) +#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) +#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) +#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) +#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) +#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) +#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) +#define HSW_FBCQ_DIS REG_BIT(22) +#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */ +#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */ +#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0) +#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0) +#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1) +#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2) +#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3) +#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */ #define _CHICKEN_TRANS_A 0x420c0 #define _CHICKEN_TRANS_B 0x420c4 @@ -4570,32 +4595,33 @@ [TRANSCODER_B] = _CHICKEN_TRANS_B, \ [TRANSCODER_C] = _CHICKEN_TRANS_C, \ [TRANSCODER_D] = _CHICKEN_TRANS_D)) - #define _MTL_CHICKEN_TRANS_A 0x604e0 #define _MTL_CHICKEN_TRANS_B 0x614e0 #define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \ _MTL_CHICKEN_TRANS_A, \ _MTL_CHICKEN_TRANS_B) - -#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) -#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x) -#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */ -#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23) -#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19) -#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18) -#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18) -#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */ -#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */ -#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15) -#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12) +#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */ +#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */ +#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) +#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x) +#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */ +#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23) +#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19) +#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18) +#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18) +#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */ +#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */ +#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15) +#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12) #define DISP_ARB_CTL _MMIO(0x45000) -#define DISP_FBC_MEMORY_WAKE (1 << 31) -#define DISP_TILE_SURFACE_SWIZZLING (1 << 13) -#define DISP_FBC_WM_DIS (1 << 15) +#define DISP_FBC_MEMORY_WAKE REG_BIT(31) +#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13) +#define DISP_FBC_WM_DIS REG_BIT(15) + #define DISP_ARB_CTL2 _MMIO(0x45004) -#define DISP_DATA_PARTITION_5_6 (1 << 6) -#define DISP_IPC_ENABLE (1 << 3) +#define DISP_DATA_PARTITION_5_6 REG_BIT(6) +#define DISP_IPC_ENABLE REG_BIT(3) #define GEN7_MSG_CTL _MMIO(0x45010) #define WAIT_FOR_PCH_RESET_ACK (1 << 1) @@ -4768,7 +4794,8 @@ SDE_FDI_RXB_CPT | \ SDE_FDI_RXA_CPT) -/* south display engine interrupt: ICP/TGP */ +/* south display engine interrupt: ICP/TGP/MTP */ +#define SDE_PICAINTERRUPT REG_BIT(31) #define SDE_GMBUS_ICP (1 << 23) #define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin)) #define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */ @@ -5104,24 +5131,32 @@ #define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1) #define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2) #define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3) + #define _TRANSA_CHICKEN1 0xf0060 #define _TRANSB_CHICKEN1 0xf1060 #define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) -#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10) -#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4) +#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10) +#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4) + #define _TRANSA_CHICKEN2 0xf0064 #define _TRANSB_CHICKEN2 0xf1064 #define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) -#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31) -#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29) -#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27) -#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */ -#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26) -#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25) +#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31) +#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29) +#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) +#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */ +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26) +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25) #define SOUTH_CHICKEN1 _MMIO(0xc2000) #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 +#define INVERT_DDIE_HPD REG_BIT(28) +#define INVERT_DDID_HPD_MTP REG_BIT(27) +#define INVERT_TC4_HPD REG_BIT(26) +#define INVERT_TC3_HPD REG_BIT(25) +#define INVERT_TC2_HPD REG_BIT(24) +#define INVERT_TC1_HPD REG_BIT(23) #define INVERT_DDID_HPD (1 << 18) #define INVERT_DDIC_HPD (1 << 17) #define INVERT_DDIB_HPD (1 << 16) @@ -5303,6 +5338,20 @@ #define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8) #define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8)) #define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8)) +#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D +#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0) +#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK +#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27) +#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31) +#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16) +#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28) +#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x)) +#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x)) +#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x)) +#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \ + ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \ + (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \ + (DISPLAY_TO_PCODE_VOLTAGE(voltage_level))) #define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe #define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0) #define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0) @@ -5563,6 +5612,8 @@ enum skl_power_gate { #define TRANS_DDI_HDCP_SELECT REG_BIT(5) #define TRANS_DDI_BFI_ENABLE (1 << 4) #define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4) +#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1) +#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1) #define TRANS_DDI_HDMI_SCRAMBLING (1 << 0) #define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ @@ -5622,11 +5673,16 @@ enum skl_power_gate { /* DDI Buffer Control */ #define _DDI_BUF_CTL_A 0x64000 #define _DDI_BUF_CTL_B 0x64100 +/* Known as DDI_CTL_DE in MTL+ */ #define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1 << 31) #define DDI_BUF_TRANS_SELECT(n) ((n) << 24) #define DDI_BUF_EMP_MASK (0xf << 24) #define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20) +#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18) +#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0) +#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1) +#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2) #define DDI_BUF_PORT_REVERSAL (1 << 16) #define DDI_BUF_IS_IDLE (1 << 7) #define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6) @@ -6383,6 +6439,20 @@ enum skl_power_gate { (index) * 4, _PLANE_CSC_POSTOFF_HI_2(pipe) + \ (index) * 4) +#define _PIPE_A_WGC_C01_C00 0x600B0 /* s2.10 */ +#define _PIPE_A_WGC_C02 0x600B4 /* s2.10 */ +#define _PIPE_A_WGC_C11_C10 0x600B8 /* s2.10 */ +#define _PIPE_A_WGC_C12 0x600BC /* s2.10 */ +#define _PIPE_A_WGC_C21_C20 0x600C0 /* s2.10 */ +#define _PIPE_A_WGC_C22 0x600C4 /* s2.10 */ + +#define PIPE_WGC_C01_C00(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C01_C00) +#define PIPE_WGC_C02(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C02) +#define PIPE_WGC_C11_C10(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C11_C10) +#define PIPE_WGC_C12(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C12) +#define PIPE_WGC_C21_C20(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C21_C20) +#define PIPE_WGC_C22(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C22) + /* pipe CSC & degamma/gamma LUTs on CHV */ #define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900) #define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index db26de6b57bc..a685db1e815d 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -23,6 +23,19 @@ ((__n) < 0 || (__n) > 31)))) /** + * REG_BIT8() - Prepare a u8 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u8, with compile time checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT8(__n) \ + ((u8)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ + ((__n) < 0 || (__n) > 7)))) + +/** * REG_GENMASK() - Prepare a continuous u32 bitmask * @__high: 0-based high bit * @__low: 0-based low bit @@ -52,6 +65,21 @@ __is_constexpr(__low) && \ ((__low) < 0 || (__high) > 63 || (__low) > (__high))))) +/** + * REG_GENMASK8() - Prepare a continuous u8 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u8, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK8(__high, __low) \ + ((u8)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ + ((__low) < 0 || (__high) > 7 || (__low) > (__high))))) + /* * Local integer constant expression version of is_power_of_2(). */ @@ -75,6 +103,23 @@ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) /** + * REG_FIELD_PREP8() - Prepare a u8 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + * + * Local copy of FIELD_PREP() to generate an integer constant expression, force + * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP8(__mask, __val) \ + ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + +/** * REG_FIELD_GET() - Extract a u32 bitfield value * @__mask: shifted mask defining the field's length and position * @__val: value to extract the bitfield value from @@ -98,6 +143,54 @@ */ #define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val)) +/** + * REG_BIT16() - Prepare a u16 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u16, with compile time + * checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT16(__n) \ + ((u16)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ + ((__n) < 0 || (__n) > 15)))) + +/** + * REG_GENMASK16() - Prepare a continuous u8 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u16, with compile time + * checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK16(__high, __low) \ + ((u16)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ + ((__low) < 0 || (__high) > 15 || (__low) > (__high))))) + +/** + * REG_FIELD_PREP16() - Prepare a u16 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + * + * Local copy of FIELD_PREP16() to generate an integer constant + * expression, force u8 and for consistency with + * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP16(__mask, __val) \ + ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) #define _MASKED_FIELD(mask, value) ({ \ if (__builtin_constant_p(mask)) \ @@ -155,6 +248,18 @@ */ #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index]) +/** + * REG_FIELD_GET8() - Extract a u8 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u8 and for consistency with + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val)) + typedef struct { u32 reg; } i915_reg_t; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a814775a363d..ffb425ba591c 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1710,6 +1710,8 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt, if (vm_ddestroy) i915_vm_resv_put(vma->vm); + /* Wait for async active retire */ + i915_active_wait(&vma->active); i915_active_fini(&vma->active); GEM_WARN_ON(vma->resource); i915_vma_free(vma); diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index 2c5302bcba19..a27600bc5976 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -36,6 +36,7 @@ #include "gt/intel_gt_regs.h" #include "i915_drv.h" +#include "i915_reg.h" #include "intel_clock_gating.h" #include "intel_mchbar_regs.h" #include "vlv_sideband.h" @@ -520,12 +521,12 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915) intel_uncore_rmw(&i915->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL); /* WaPsrDPAMaskVBlankInSRD:bdw */ - intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, DPA_MASK_VBLANK_SRD); + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD); for_each_pipe(i915, pipe) { /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe), - 0, BDW_DPRS_MASK_VBLANK_SRD); + 0, BDW_UNMASK_VBL_TO_REGS_IN_SRD); } /* WaVSRefCountFullforceMissDisable:bdw */ diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index fc5cd14adfcc..2f79d232b04a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -95,6 +95,9 @@ void intel_device_info_print(const struct intel_device_info *info, const struct intel_runtime_info *runtime, struct drm_printer *p) { + const struct intel_display_runtime_info *display_runtime = + &info->display->__runtime_defaults; + if (runtime->graphics.ip.rel) drm_printf(p, "graphics version: %u.%02u\n", runtime->graphics.ip.ver, @@ -111,13 +114,13 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "media version: %u\n", runtime->media.ip.ver); - if (runtime->display.ip.rel) + if (display_runtime->ip.rel) drm_printf(p, "display version: %u.%02u\n", - runtime->display.ip.ver, - runtime->display.ip.rel); + display_runtime->ip.ver, + display_runtime->ip.rel); else drm_printf(p, "display version: %u\n", - runtime->display.ip.ver); + display_runtime->ip.ver); drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); @@ -138,13 +141,13 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu)); -#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name)) +#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display->name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG - drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); - drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); - drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); + drm_printf(p, "has_hdcp: %s\n", str_yes_no(display_runtime->has_hdcp)); + drm_printf(p, "has_dmc: %s\n", str_yes_no(display_runtime->has_dmc)); + drm_printf(p, "has_dsc: %s\n", str_yes_no(display_runtime->has_dsc)); drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } @@ -362,8 +365,6 @@ static void intel_ipver_early_init(struct drm_i915_private *i915) RUNTIME_INFO(i915)->graphics.ip.ver = 12; RUNTIME_INFO(i915)->graphics.ip.rel = 70; } - ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_DISPLAY), - &runtime->display.ip); ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_MEDIA), &runtime->media.ip); } @@ -381,6 +382,15 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915) intel_device_info_subplatform_init(i915); } +/* FIXME: Remove this, and make device info a const pointer to rodata. */ +static struct intel_device_info * +mkwrite_device_info(struct drm_i915_private *i915) +{ + return (struct intel_device_info *)INTEL_INFO(i915); +} + +static const struct intel_display_device_info no_display = {}; + /** * intel_device_info_runtime_init - initialize runtime info * @dev_priv: the i915 device @@ -401,32 +411,34 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); + struct intel_display_runtime_info *display_runtime = + DISPLAY_RUNTIME_INFO(dev_priv); enum pipe pipe; /* Wa_14011765242: adl-s A0,A1 */ if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A2)) for_each_pipe(dev_priv, pipe) - runtime->num_scalers[pipe] = 0; + display_runtime->num_scalers[pipe] = 0; else if (DISPLAY_VER(dev_priv) >= 11) { for_each_pipe(dev_priv, pipe) - runtime->num_scalers[pipe] = 2; + display_runtime->num_scalers[pipe] = 2; } else if (DISPLAY_VER(dev_priv) >= 9) { - runtime->num_scalers[PIPE_A] = 2; - runtime->num_scalers[PIPE_B] = 2; - runtime->num_scalers[PIPE_C] = 1; + display_runtime->num_scalers[PIPE_A] = 2; + display_runtime->num_scalers[PIPE_B] = 2; + display_runtime->num_scalers[PIPE_C] = 1; } BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) for_each_pipe(dev_priv, pipe) - runtime->num_sprites[pipe] = 4; + display_runtime->num_sprites[pipe] = 4; else if (DISPLAY_VER(dev_priv) >= 11) for_each_pipe(dev_priv, pipe) - runtime->num_sprites[pipe] = 6; + display_runtime->num_sprites[pipe] = 6; else if (DISPLAY_VER(dev_priv) == 10) for_each_pipe(dev_priv, pipe) - runtime->num_sprites[pipe] = 3; + display_runtime->num_sprites[pipe] = 3; else if (IS_BROXTON(dev_priv)) { /* * Skylake and Broxton currently don't expose the topmost plane as its @@ -437,15 +449,15 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) * down the line. */ - runtime->num_sprites[PIPE_A] = 2; - runtime->num_sprites[PIPE_B] = 2; - runtime->num_sprites[PIPE_C] = 1; + display_runtime->num_sprites[PIPE_A] = 2; + display_runtime->num_sprites[PIPE_B] = 2; + display_runtime->num_sprites[PIPE_C] = 1; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { for_each_pipe(dev_priv, pipe) - runtime->num_sprites[pipe] = 2; + display_runtime->num_sprites[pipe] = 2; } else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) { for_each_pipe(dev_priv, pipe) - runtime->num_sprites[pipe] = 1; + display_runtime->num_sprites[pipe] = 1; } if (HAS_DISPLAY(dev_priv) && @@ -453,7 +465,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) { drm_info(&dev_priv->drm, "Display not present, disabling\n"); - runtime->pipe_mask = 0; + display_runtime->pipe_mask = 0; } if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) && @@ -476,47 +488,47 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - runtime->pipe_mask = 0; + display_runtime->pipe_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - runtime->pipe_mask &= ~BIT(PIPE_C); - runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + display_runtime->pipe_mask &= ~BIT(PIPE_C); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - runtime->pipe_mask &= ~BIT(PIPE_A); - runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); - runtime->fbc_mask &= ~BIT(INTEL_FBC_A); + display_runtime->pipe_mask &= ~BIT(PIPE_A); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - runtime->pipe_mask &= ~BIT(PIPE_B); - runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + display_runtime->pipe_mask &= ~BIT(PIPE_B); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - runtime->pipe_mask &= ~BIT(PIPE_C); - runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + display_runtime->pipe_mask &= ~BIT(PIPE_C); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - runtime->pipe_mask &= ~BIT(PIPE_D); - runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + display_runtime->pipe_mask &= ~BIT(PIPE_D); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) - runtime->has_hdcp = 0; + display_runtime->has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - runtime->fbc_mask = 0; + display_runtime->fbc_mask = 0; if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) - runtime->has_dmc = 0; + display_runtime->has_dmc = 0; if (IS_DISPLAY_VER(dev_priv, 10, 12) && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) - runtime->has_dsc = 0; + display_runtime->has_dsc = 0; } if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) { @@ -531,15 +543,15 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) { dev_priv->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); - memset(&info->display, 0, sizeof(info->display)); - - runtime->cpu_transcoder_mask = 0; - memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites)); - memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers)); - runtime->fbc_mask = 0; - runtime->has_hdcp = false; - runtime->has_dmc = false; - runtime->has_dsc = false; + info->display = &no_display; + + display_runtime->cpu_transcoder_mask = 0; + memset(display_runtime->num_sprites, 0, sizeof(display_runtime->num_sprites)); + memset(display_runtime->num_scalers, 0, sizeof(display_runtime->num_scalers)); + display_runtime->fbc_mask = 0; + display_runtime->has_hdcp = false; + display_runtime->has_dmc = false; + display_runtime->has_dsc = false; } /* Disable nuclear pageflip by default on pre-g4x */ @@ -548,6 +560,43 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; } +/* + * Set up device info and initial runtime info at driver create. + * + * Note: i915 is only an allocated blob of memory at this point. + */ +void intel_device_info_driver_create(struct drm_i915_private *i915, + u16 device_id, + const struct intel_device_info *match_info) +{ + struct intel_device_info *info; + struct intel_runtime_info *runtime; + u16 ver, rel, step; + + /* Setup the write-once "constant" device info */ + info = mkwrite_device_info(i915); + memcpy(info, match_info, sizeof(*info)); + + /* Initialize initial runtime info from static const data and pdev. */ + runtime = RUNTIME_INFO(i915); + memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); + + /* Probe display support */ + info->display = intel_display_device_probe(i915, info->has_gmd_id, + &ver, &rel, &step); + memcpy(DISPLAY_RUNTIME_INFO(i915), + &DISPLAY_INFO(i915)->__runtime_defaults, + sizeof(*DISPLAY_RUNTIME_INFO(i915))); + + if (info->has_gmd_id) { + DISPLAY_RUNTIME_INFO(i915)->ip.ver = ver; + DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel; + DISPLAY_RUNTIME_INFO(i915)->ip.step = step; + } + + runtime->device_id = device_id; +} + void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p) { diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index d737dd601a6e..069291b3bd37 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -29,7 +29,7 @@ #include "intel_step.h" -#include "display/intel_display_limits.h" +#include "display/intel_display_device.h" #include "gt/intel_engine_types.h" #include "gt/intel_context_types.h" @@ -182,25 +182,6 @@ enum intel_ppgtt_type { func(unfenced_needs_alignment); \ func(hws_needs_physical); -#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ - /* Keep in alphabetical order */ \ - func(cursor_needs_physical); \ - func(has_cdclk_crawl); \ - func(has_cdclk_squash); \ - func(has_ddi); \ - func(has_dp_mst); \ - func(has_dsb); \ - func(has_fpga_dbg); \ - func(has_gmch); \ - func(has_hotplug); \ - func(has_hti); \ - func(has_ipc); \ - func(has_overlay); \ - func(has_psr); \ - func(has_psr_hw_tracking); \ - func(overlay_needs_physical); \ - func(supports_tv); - struct intel_ip_version { u8 ver; u8 rel; @@ -218,9 +199,6 @@ struct intel_runtime_info { struct { struct intel_ip_version ip; } media; - struct { - struct intel_ip_version ip; - } display; /* * Platform mask is used for optimizing or-ed IS_PLATFORM calls into @@ -248,21 +226,6 @@ struct intel_runtime_info { u32 memory_regions; /* regions supported by the HW */ bool has_pooled_eu; - - /* display */ - struct { - u8 pipe_mask; - u8 cpu_transcoder_mask; - - u8 num_sprites[I915_MAX_PIPES]; - u8 num_scalers[I915_MAX_PIPES]; - - u8 fbc_mask; - - bool has_hdcp; - bool has_dmc; - bool has_dsc; - }; }; struct intel_device_info { @@ -278,33 +241,7 @@ struct intel_device_info { DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG - struct { - u8 abox_mask; - - struct { - u16 size; /* in blocks */ - u8 slice_mask; - } dbuf; - -#define DEFINE_FLAG(name) u8 name:1 - DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); -#undef DEFINE_FLAG - - /* Global register offset for the display engine */ - u32 mmio_offset; - - /* Register offsets for the various display pipes and transcoders */ - u32 pipe_offsets[I915_MAX_TRANSCODERS]; - u32 trans_offsets[I915_MAX_TRANSCODERS]; - u32 cursor_offsets[I915_MAX_PIPES]; - - struct { - u32 degamma_lut_size; - u32 gamma_lut_size; - u32 degamma_lut_tests; - u32 gamma_lut_tests; - } color; - } display; + const struct intel_display_device_info *display; /* * Initial runtime info. Do not access outside of i915_driver_create(). @@ -322,6 +259,8 @@ struct intel_driver_caps { const char *intel_platform_name(enum intel_platform platform); +void intel_device_info_driver_create(struct drm_i915_private *i915, u16 device_id, + const struct intel_device_info *match_info); void intel_device_info_runtime_init_early(struct drm_i915_private *dev_priv); void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index e592e8d6499a..764b183ae452 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -16,12 +16,6 @@ struct device; struct drm_i915_private; struct drm_printer; -enum i915_drm_suspend_mode { - I915_DRM_SUSPEND_IDLE, - I915_DRM_SUSPEND_MEM, - I915_DRM_SUSPEND_HIBERNATE, -}; - /* * This struct helps tracking the state needed for runtime PM, which puts the * device in PCI D3 state. Notice that when this happens, nothing on the diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index 84a6fe736a3b..8a9ff6227e53 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -166,8 +166,12 @@ void intel_step_init(struct drm_i915_private *i915) &RUNTIME_INFO(i915)->graphics.ip); step.media_step = gmd_to_intel_step(i915, &RUNTIME_INFO(i915)->media.ip); - step.display_step = gmd_to_intel_step(i915, - &RUNTIME_INFO(i915)->display.ip); + step.display_step = STEP_A0 + DISPLAY_RUNTIME_INFO(i915)->ip.step; + if (step.display_step >= STEP_FUTURE) { + drm_dbg(&i915->drm, "Using future display steppings\n"); + step.display_step = STEP_FUTURE; + } + RUNTIME_INFO(i915)->step = step; return; diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index 24dde5531423..d4608b220123 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -28,7 +28,7 @@ alloc_empty_config(struct i915_perf *perf) oa_config->perf = perf; kref_init(&oa_config->ref); - strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid)); + strscpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid)); mutex_lock(&perf->metrics_lock); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index 85cbfc3413ee..51359cc5ece9 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -735,13 +735,13 @@ static const struct i915_hdcp_ops mei_hdcp_ops = { static int mei_component_master_bind(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); + struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev); int ret; dev_dbg(dev, "%s\n", __func__); - comp_master->ops = &mei_hdcp_ops; - comp_master->hdcp_dev = dev; - ret = component_bind_all(dev, comp_master); + comp_arbiter->ops = &mei_hdcp_ops; + comp_arbiter->hdcp_dev = dev; + ret = component_bind_all(dev, comp_arbiter); if (ret < 0) return ret; @@ -751,10 +751,10 @@ static int mei_component_master_bind(struct device *dev) static void mei_component_master_unbind(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); + struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev); dev_dbg(dev, "%s\n", __func__); - component_unbind_all(dev, comp_master); + component_unbind_all(dev, comp_arbiter); } static const struct component_master_ops mei_component_master_ops = { @@ -799,7 +799,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent, static int mei_hdcp_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { - struct i915_hdcp_master *comp_master; + struct i915_hdcp_arbiter *comp_arbiter; struct component_match *master_match; int ret; @@ -809,8 +809,8 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev, goto enable_err_exit; } - comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL); - if (!comp_master) { + comp_arbiter = kzalloc(sizeof(*comp_arbiter), GFP_KERNEL); + if (!comp_arbiter) { ret = -ENOMEM; goto err_exit; } @@ -823,7 +823,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev, goto err_exit; } - mei_cldev_set_drvdata(cldev, comp_master); + mei_cldev_set_drvdata(cldev, comp_arbiter); ret = component_master_add_with_match(&cldev->dev, &mei_component_master_ops, master_match); @@ -836,7 +836,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev, err_exit: mei_cldev_set_drvdata(cldev, NULL); - kfree(comp_master); + kfree(comp_arbiter); mei_cldev_disable(cldev); enable_err_exit: return ret; @@ -844,11 +844,11 @@ enable_err_exit: static void mei_hdcp_remove(struct mei_cl_device *cldev) { - struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); + struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev); int ret; component_master_del(&cldev->dev, &mei_component_master_ops); - kfree(comp_master); + kfree(comp_arbiter); mei_cldev_set_drvdata(cldev, NULL); ret = mei_cldev_disable(cldev); diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 32c764fb9cb5..f962e97880b4 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -138,12 +138,7 @@ struct drm_dp_mst_port { * @cached_edid: for DP logical ports - make tiling work by ensuring * that the EDID for all connectors is read immediately. */ - struct edid *cached_edid; - /** - * @has_audio: Tracks whether the sink connector to this port is - * audio-capable. - */ - bool has_audio; + const struct drm_edid *cached_edid; /** * @fec_capable: bool indicating if FEC can be supported up to that @@ -824,7 +819,12 @@ drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, int link_rate, int link_lane_count); diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h index 8b41edbbabab..fc2104415dcb 100644 --- a/include/drm/display/drm_dsc_helper.h +++ b/include/drm/display/drm_dsc_helper.h @@ -10,10 +10,19 @@ #include <drm/display/drm_dsc.h> +enum drm_dsc_params_type { + DRM_DSC_1_2_444, + DRM_DSC_1_1_PRE_SCR, /* legacy params from DSC 1.1 */ + DRM_DSC_1_2_422, + DRM_DSC_1_2_420, +}; + void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, const struct drm_dsc_config *dsc_cfg); +void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg); +int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type); int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); #endif /* _DRM_DSC_HELPER_H_ */ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index e6478fafa6b0..e143fef07de9 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -659,6 +659,14 @@ struct drm_display_info { bool is_hdmi; /** + * @has_audio: True if the sink supports audio. + * + * This field shall be used instead of calling + * drm_detect_monitor_audio() when possible. + */ + bool has_audio; + + /** * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? */ bool has_hdmi_infoframe; diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 571885d32907..169755d3de19 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -613,6 +613,8 @@ const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector, const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector, int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len), void *context); +const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); int drm_edid_connector_update(struct drm_connector *connector, const struct drm_edid *edid); int drm_edid_connector_add_modes(struct drm_connector *connector); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7adce327c1c2..adff68538484 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -42,7 +42,7 @@ extern struct resource intel_graphics_stolen_res; * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. * This is all handled in the intel-gtt.ko module. i915.ko only - * cares about the vga bit for the vga rbiter. + * cares about the vga bit for the vga arbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h index 2059b066f8a1..4c9c8167c2d5 100644 --- a/include/drm/i915_hdcp_interface.h +++ b/include/drm/i915_hdcp_interface.h @@ -168,12 +168,12 @@ struct i915_hdcp_ops { }; /** - * struct i915_hdcp_master - Used for communication between i915 + * struct i915_hdcp_arbiter - Used for communication between i915 * and hdcp drivers for the HDCP2.2 services * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus. * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver. */ -struct i915_hdcp_master { +struct i915_hdcp_arbiter { struct device *hdcp_dev; const struct i915_hdcp_ops *ops; diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index de703c6be969..8db7fd3f743e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -658,6 +658,49 @@ extern "C" { #define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) /* + * Intel Color Control Surfaces (CCS) for display ver. 14 render compression. + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13) + +/* + * Intel Color Control Surfaces (CCS) for display ver. 14 media compression + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14) + +/* + * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render + * compression. + * + * The main surface is tile4 and is at plane index 0 whereas CCS is linear + * and at index 1. The clear color is stored at index 2, and the pitch should + * be ignored. The clear color structure is 256 bits. The first 128 bits + * represents Raw Clear Color Red, Green, Blue and Alpha color each represented + * by 32 bits. The raw clear color is consumed by the 3d engine and generates + * the converted clear color of size 64 bits. The first 32 bits store the Lower + * Converted Clear Color value and the next 32 bits store the Higher Converted + * Clear Color value when applicable. The Converted Clear Color values are + * consumed by the DE. The last 64 bits are used to store Color Discard Enable + * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line + * corresponds to an area of 4x1 tiles in the main surface. The main surface + * pitch is required to be a multiple of 4 tile widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15) + +/* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * * Macroblocks are laid in a Z-shape, and each pixel data is following the |