diff options
author | Sean Paul | 2019-03-06 09:22:18 -0500 |
---|---|---|
committer | Sean Paul | 2019-03-06 09:22:18 -0500 |
commit | cd7d3a1bb42e0756b17ccfafbd956ca7ed757846 (patch) | |
tree | fb76fd0ccaaaa3f27ef0a06ecf3d04ebe9d4025d /drivers/gpu/drm/msm | |
parent | 6b5c029df5e40704a6642e026e709dd0dec44622 (diff) | |
parent | 4b057e73f28f1df13b77b77a52094238ffdf8abd (diff) |
Merge drm/drm-next into drm-misc-next
Picking up v5.0 + missed misc-fixes from last release
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm')
25 files changed, 286 insertions, 869 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index d130825e2c75..b776fca571f3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -465,8 +465,6 @@ static void _dpu_crtc_setup_mixer_for_encoder( return; } - mixer->encoder = enc; - cstate->num_mixers++; DPU_DEBUG("setup mixer %d: lm %d\n", i, mixer->hw_lm->idx - LM_0); @@ -718,11 +716,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) * may delay and flush at an irq event (e.g. ppdone) */ drm_for_each_encoder_mask(encoder, crtc->dev, - crtc->state->encoder_mask) { - struct dpu_encoder_kickoff_params params = { 0 }; - dpu_encoder_prepare_for_kickoff(encoder, ¶ms, async); - } - + crtc->state->encoder_mask) + dpu_encoder_prepare_for_kickoff(encoder, async); if (!async) { /* wait for frame_event_done completion */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index dbfb38a1986c..e59d62be4980 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -84,14 +84,12 @@ struct dpu_crtc_smmu_state_data { * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context * @lm_ctl: CTL Path HW driver context - * @encoder: Encoder attached to this lm & ctl * @mixer_op_mode: mixer blending operation mode * @flush_mask: mixer flush mask for ctl, mixer and pipe */ struct dpu_crtc_mixer { struct dpu_hw_mixer *hw_lm; struct dpu_hw_ctl *lm_ctl; - struct drm_encoder *encoder; u32 mixer_op_mode; u32 flush_mask; }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 36af231bb73f..5aa3307f3f0c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -205,7 +205,7 @@ struct dpu_encoder_virt { bool idle_pc_supported; struct mutex rc_lock; enum dpu_enc_rc_states rc_state; - struct kthread_delayed_work delayed_off_work; + struct delayed_work delayed_off_work; struct kthread_work vsync_event_work; struct msm_display_topology topology; bool mode_set_complete; @@ -742,7 +742,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, { struct dpu_encoder_virt *dpu_enc; struct msm_drm_private *priv; - struct msm_drm_thread *disp_thread; bool is_vid_mode = false; if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || @@ -755,12 +754,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, is_vid_mode = dpu_enc->disp_info.capabilities & MSM_DISPLAY_CAP_VID_MODE; - if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) { - DPU_ERROR("invalid crtc index\n"); - return -EINVAL; - } - disp_thread = &priv->disp_thread[drm_enc->crtc->index]; - /* * when idle_pc is not supported, process only KICKOFF, STOP and MODESET * events and return early for other events (ie wb display). @@ -777,8 +770,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, switch (sw_event) { case DPU_ENC_RC_EVENT_KICKOFF: /* cancel delayed off work, if any */ - if (kthread_cancel_delayed_work_sync( - &dpu_enc->delayed_off_work)) + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", sw_event); @@ -837,10 +829,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, return 0; } - kthread_queue_delayed_work( - &disp_thread->worker, - &dpu_enc->delayed_off_work, - msecs_to_jiffies(dpu_enc->idle_timeout)); + queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, + msecs_to_jiffies(dpu_enc->idle_timeout)); trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, dpu_enc->rc_state, @@ -849,8 +839,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, case DPU_ENC_RC_EVENT_PRE_STOP: /* cancel delayed off work, if any */ - if (kthread_cancel_delayed_work_sync( - &dpu_enc->delayed_off_work)) + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", sw_event); @@ -1368,7 +1357,7 @@ static void dpu_encoder_frame_done_callback( } } -static void dpu_encoder_off_work(struct kthread_work *work) +static void dpu_encoder_off_work(struct work_struct *work) { struct dpu_encoder_virt *dpu_enc = container_of(work, struct dpu_encoder_virt, delayed_off_work.work); @@ -1756,15 +1745,14 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) nsecs_to_jiffies(ktime_to_ns(wakeup_time))); } -void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, - struct dpu_encoder_kickoff_params *params, bool async) +void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async) { struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_phys *phys; bool needs_hw_reset = false; unsigned int i; - if (!drm_enc || !params) { + if (!drm_enc) { DPU_ERROR("invalid args\n"); return; } @@ -1778,7 +1766,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, phys = dpu_enc->phys_encs[i]; if (phys) { if (phys->ops.prepare_for_kickoff) - phys->ops.prepare_for_kickoff(phys, params); + phys->ops.prepare_for_kickoff(phys); if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) needs_hw_reset = true; } @@ -2193,7 +2181,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, mutex_init(&dpu_enc->rc_lock); - kthread_init_delayed_work(&dpu_enc->delayed_off_work, + INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); dpu_enc->idle_timeout = IDLE_TIMEOUT; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 3f5dafe00580..d77f74fb26d4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -38,15 +38,6 @@ struct dpu_encoder_hw_resources { }; /** - * dpu_encoder_kickoff_params - info encoder requires at kickoff - * @affected_displays: bitmask, bit set means the ROI of the commit lies within - * the bounds of the physical display at the bit index - */ -struct dpu_encoder_kickoff_params { - unsigned long affected_displays; -}; - -/** * dpu_encoder_get_hw_resources - Populate table of required hardware resources * @encoder: encoder pointer * @hw_res: resource table to populate with encoder required resources @@ -88,11 +79,9 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder, * Immediately: if no previous commit is outstanding. * Delayed: Block until next trigger can be issued. * @encoder: encoder pointer - * @params: kickoff time parameters * @async: true if this is an asynchronous commit */ -void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, - struct dpu_encoder_kickoff_params *params, bool async); +void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async); /** * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 44e6f8b68e70..db94f3d3bea3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -144,8 +144,7 @@ struct dpu_encoder_phys_ops { int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); - void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc, - struct dpu_encoder_kickoff_params *params); + void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); void (*trigger_start)(struct dpu_encoder_phys *phys_enc); bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 99ab5ca9bed3..a399e1edd313 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -594,8 +594,7 @@ static void dpu_encoder_phys_cmd_get_hw_resources( } static void dpu_encoder_phys_cmd_prepare_for_kickoff( - struct dpu_encoder_phys *phys_enc, - struct dpu_encoder_kickoff_params *params) + struct dpu_encoder_phys *phys_enc) { struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); @@ -693,7 +692,7 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( /* required for both controllers */ if (!rc && cmd_enc->serialize_wait4pp) - dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL); + dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc); return rc; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index acdab5b0db18..3c4eb470a82c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -587,14 +587,13 @@ static int dpu_encoder_phys_vid_wait_for_vblank( } static void dpu_encoder_phys_vid_prepare_for_kickoff( - struct dpu_encoder_phys *phys_enc, - struct dpu_encoder_kickoff_params *params) + struct dpu_encoder_phys *phys_enc) { struct dpu_encoder_phys_vid *vid_enc; struct dpu_hw_ctl *ctl; int rc; - if (!phys_enc || !params) { + if (!phys_enc) { DPU_ERROR("invalid encoder/parameters\n"); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 0874f0a53bf9..f59fe1a9f4b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -263,13 +263,13 @@ static const struct dpu_format dpu_format_map[] = { INTERLEAVED_RGB_FMT(RGB565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), @@ -1137,36 +1137,3 @@ const struct msm_format *dpu_get_msm_format( return &fmt->base; return NULL; } - -uint32_t dpu_populate_formats( - const struct dpu_format_extended *format_list, - uint32_t *pixel_formats, - uint64_t *pixel_modifiers, - uint32_t pixel_formats_max) -{ - uint32_t i, fourcc_format; - - if (!format_list || !pixel_formats) - return 0; - - for (i = 0, fourcc_format = 0; - format_list->fourcc_format && i < pixel_formats_max; - ++format_list) { - /* verify if listed format is in dpu_format_map? */ - - /* optionally return modified formats */ - if (pixel_modifiers) { - /* assume same modifier for all fb planes */ - pixel_formats[i] = format_list->fourcc_format; - pixel_modifiers[i++] = format_list->modifier; - } else { - /* assume base formats grouped together */ - if (fourcc_format != format_list->fourcc_format) { - fourcc_format = format_list->fourcc_format; - pixel_formats[i++] = fourcc_format; - } - } - } - - return i; -} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h index a54451d8d011..c02c81e7a667 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -41,20 +41,6 @@ const struct msm_format *dpu_get_msm_format( const uint64_t modifiers); /** - * dpu_populate_formats - populate the given array with fourcc codes supported - * @format_list: pointer to list of possible formats - * @pixel_formats: array to populate with fourcc codes - * @pixel_modifiers: array to populate with drm modifiers, can be NULL - * @pixel_formats_max: length of pixel formats array - * Return: number of elements populated - */ -uint32_t dpu_populate_formats( - const struct dpu_format_extended *format_list, - uint32_t *pixel_formats, - uint64_t *pixel_modifiers, - uint32_t pixel_formats_max); - -/** * dpu_format_check_modified_format - validate format and buffers for * dpu non-standard, i.e. modified format * @kms: kms driver diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 512ac0834d2b..df6852cc98b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -151,7 +151,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { .id = DPU_SSPP_CSC_10BIT, \ .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ } #define _DMA_SBLK(num, sdma_pri) \ @@ -163,7 +165,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { .src_blk = {.name = STRCAT("sspp_src_", num), \ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ } static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 144358a3d0fb..a55653b2e466 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -252,17 +252,6 @@ struct dpu_pp_blk { }; /** - * struct dpu_format_extended - define dpu specific pixel format+modifier - * @fourcc_format: Base FOURCC pixel format code - * @modifier: 64-bit drm format modifier, same modifier must be applied to all - * framebuffer planes - */ -struct dpu_format_extended { - uint32_t fourcc_format; - uint64_t modifier; -}; - -/** * enum dpu_qos_lut_usage - define QoS LUT use cases */ enum dpu_qos_lut_usage { @@ -348,7 +337,9 @@ struct dpu_sspp_blks_common { * @pcc_blk: * @igc_blk: * @format_list: Pointer to list of supported formats + * @num_formats: Number of supported formats * @virt_format_list: Pointer to list of supported formats for virtual planes + * @virt_num_formats: Number of supported formats for virtual planes */ struct dpu_sspp_sub_blks { const struct dpu_sspp_blks_common *common; @@ -366,8 +357,10 @@ struct dpu_sspp_sub_blks { struct dpu_pp_blk pcc_blk; struct dpu_pp_blk igc_blk; - const struct dpu_format_extended *format_list; - const struct dpu_format_extended *virt_format_list; + const u32 *format_list; + u32 num_formats; + const u32 *virt_format_list; + u32 virt_num_formats; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h index 3c9f028628ef..d09730985951 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h @@ -12,157 +12,81 @@ #include "dpu_hw_mdss.h" -static const struct dpu_format_extended plane_formats[] = { - {DRM_FORMAT_ARGB8888, 0}, - {DRM_FORMAT_ABGR8888, 0}, - {DRM_FORMAT_RGBA8888, 0}, - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_BGRA8888, 0}, - {DRM_FORMAT_XRGB8888, 0}, - {DRM_FORMAT_RGBX8888, 0}, - {DRM_FORMAT_BGRX8888, 0}, - {DRM_FORMAT_XBGR8888, 0}, - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_RGB888, 0}, - {DRM_FORMAT_BGR888, 0}, - {DRM_FORMAT_RGB565, 0}, - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_BGR565, 0}, - {DRM_FORMAT_ARGB1555, 0}, - {DRM_FORMAT_ABGR1555, 0}, - {DRM_FORMAT_RGBA5551, 0}, - {DRM_FORMAT_BGRA5551, 0}, - {DRM_FORMAT_XRGB1555, 0}, - {DRM_FORMAT_XBGR1555, 0}, - {DRM_FORMAT_RGBX5551, 0}, - {DRM_FORMAT_BGRX5551, 0}, - {DRM_FORMAT_ARGB4444, 0}, - {DRM_FORMAT_ABGR4444, 0}, - {DRM_FORMAT_RGBA4444, 0}, - {DRM_FORMAT_BGRA4444, 0}, - {DRM_FORMAT_XRGB4444, 0}, - {DRM_FORMAT_XBGR4444, 0}, - {DRM_FORMAT_RGBX4444, 0}, - {DRM_FORMAT_BGRX4444, 0}, - {0, 0}, +static const uint32_t qcom_compressed_supported_formats[] = { + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_BGR565, }; -static const struct dpu_format_extended plane_formats_yuv[] = { - {DRM_FORMAT_ARGB8888, 0}, - {DRM_FORMAT_ABGR8888, 0}, - {DRM_FORMAT_RGBA8888, 0}, - {DRM_FORMAT_BGRX8888, 0}, - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_BGRA8888, 0}, - {DRM_FORMAT_XRGB8888, 0}, - {DRM_FORMAT_XBGR8888, 0}, - {DRM_FORMAT_RGBX8888, 0}, - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_RGB888, 0}, - {DRM_FORMAT_BGR888, 0}, - {DRM_FORMAT_RGB565, 0}, - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_BGR565, 0}, - {DRM_FORMAT_ARGB1555, 0}, - {DRM_FORMAT_ABGR1555, 0}, - {DRM_FORMAT_RGBA5551, 0}, - {DRM_FORMAT_BGRA5551, 0}, - {DRM_FORMAT_XRGB1555, 0}, - {DRM_FORMAT_XBGR1555, 0}, - {DRM_FORMAT_RGBX5551, 0}, - {DRM_FORMAT_BGRX5551, 0}, - {DRM_FORMAT_ARGB4444, 0}, - {DRM_FORMAT_ABGR4444, 0}, - {DRM_FORMAT_RGBA4444, 0}, - {DRM_FORMAT_BGRA4444, 0}, - {DRM_FORMAT_XRGB4444, 0}, - {DRM_FORMAT_XBGR4444, 0}, - {DRM_FORMAT_RGBX4444, 0}, - {DRM_FORMAT_BGRX4444, 0}, - - {DRM_FORMAT_NV12, 0}, - {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_NV21, 0}, - {DRM_FORMAT_NV16, 0}, - {DRM_FORMAT_NV61, 0}, - {DRM_FORMAT_VYUY, 0}, - {DRM_FORMAT_UYVY, 0}, - {DRM_FORMAT_YUYV, 0}, - {DRM_FORMAT_YVYU, 0}, - {DRM_FORMAT_YUV420, 0}, - {DRM_FORMAT_YVU420, 0}, - {0, 0}, -}; - -static const struct dpu_format_extended cursor_formats[] = { - {DRM_FORMAT_ARGB8888, 0}, - {DRM_FORMAT_ABGR8888, 0}, - {DRM_FORMAT_RGBA8888, 0}, - {DRM_FORMAT_BGRA8888, 0}, - {DRM_FORMAT_XRGB8888, 0}, - {DRM_FORMAT_ARGB1555, 0}, - {DRM_FORMAT_ABGR1555, 0}, - {DRM_FORMAT_RGBA5551, 0}, - {DRM_FORMAT_BGRA5551, 0}, - {DRM_FORMAT_ARGB4444, 0}, - {DRM_FORMAT_ABGR4444, 0}, - {DRM_FORMAT_RGBA4444, 0}, - {DRM_FORMAT_BGRA4444, 0}, - {0, 0}, +static const uint32_t plane_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, }; -static const struct dpu_format_extended wb2_formats[] = { - {DRM_FORMAT_RGB565, 0}, - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_RGB888, 0}, - {DRM_FORMAT_ARGB8888, 0}, - {DRM_FORMAT_RGBA8888, 0}, - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_XRGB8888, 0}, - {DRM_FORMAT_RGBX8888, 0}, - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_ARGB1555, 0}, - {DRM_FORMAT_RGBA5551, 0}, - {DRM_FORMAT_XRGB1555, 0}, - {DRM_FORMAT_RGBX5551, 0}, - {DRM_FORMAT_ARGB4444, 0}, - {DRM_FORMAT_RGBA4444, 0}, - {DRM_FORMAT_RGBX4444, 0}, - {DRM_FORMAT_XRGB4444, 0}, - - {DRM_FORMAT_BGR565, 0}, - {DRM_FORMAT_BGR888, 0}, - {DRM_FORMAT_ABGR8888, 0}, - {DRM_FORMAT_BGRA8888, 0}, - {DRM_FORMAT_BGRX8888, 0}, - {DRM_FORMAT_XBGR8888, 0}, - {DRM_FORMAT_ABGR1555, 0}, - {DRM_FORMAT_BGRA5551, 0}, - {DRM_FORMAT_XBGR1555, 0}, - {DRM_FORMAT_BGRX5551, 0}, - {DRM_FORMAT_ABGR4444, 0}, - {DRM_FORMAT_BGRA4444, 0}, - {DRM_FORMAT_BGRX4444, 0}, - {DRM_FORMAT_XBGR4444, 0}, - - {DRM_FORMAT_YUV420, 0}, - {DRM_FORMAT_NV12, 0}, - {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_NV16, 0}, - {DRM_FORMAT_YUYV, 0}, - - {0, 0}, -}; +static const uint32_t plane_formats_yuv[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, -static const struct dpu_format_extended rgb_10bit_formats[] = { - {DRM_FORMAT_BGRA1010102, 0}, - {DRM_FORMAT_BGRX1010102, 0}, - {DRM_FORMAT_RGBA1010102, 0}, - {DRM_FORMAT_RGBX1010102, 0}, - {DRM_FORMAT_ABGR2101010, 0}, - {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_XBGR2101010, 0}, - {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, - {DRM_FORMAT_ARGB2101010, 0}, - {DRM_FORMAT_XRGB2101010, 0}, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_VYUY, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index c0b7f0049365..8a28a03ac6a9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -170,10 +170,6 @@ /** * AD4 interrupt status bit definitions */ -#define DPU_INTR_BRIGHTPR_UPDATED BIT(4) -#define DPU_INTR_DARKENH_UPDATED BIT(3) -#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2) -#define DPU_INTR_STREN_INROI_UPDATED BIT(1) #define DPU_INTR_BACKLIGHT_UPDATED BIT(0) /** * struct dpu_intr_reg - array of DPU register sets @@ -782,18 +778,6 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type, return -EINVAL; } -static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off, - uint32_t mask) -{ - if (!intr) - return; - - DPU_REG_WRITE(&intr->hw, reg_off, mask); - - /* ensure register writes go through */ - wmb(); -} - static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, void (*cbfunc)(void *, int), void *arg) @@ -1004,18 +988,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) return 0; } -static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr, - uint32_t *mask) -{ - if (!intr || !mask) - return -EINVAL; - - *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1 - | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP; - - return 0; -} - static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr) { int i; @@ -1065,19 +1037,6 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, wmb(); } -static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr, - int irq_idx) -{ - unsigned long irq_flags; - - if (!intr) - return; - - spin_lock_irqsave(&intr->irq_lock, irq_flags); - dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx); - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); -} - static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, int irq_idx, bool clear) { @@ -1113,16 +1072,13 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) { - ops->set_mask = dpu_hw_intr_set_mask; ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup; ops->enable_irq = dpu_hw_intr_enable_irq; ops->disable_irq = dpu_hw_intr_disable_irq; ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; ops->clear_all_irqs = dpu_hw_intr_clear_irqs; ops->disable_all_irqs = dpu_hw_intr_disable_irqs; - ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts; ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses; - ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status; ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock; ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 61e4cba36562..4d7a1c727ce2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -20,13 +20,6 @@ #include "dpu_hw_util.h" #include "dpu_hw_mdss.h" -#define IRQ_SOURCE_MDP BIT(0) -#define IRQ_SOURCE_DSI0 BIT(4) -#define IRQ_SOURCE_DSI1 BIT(5) -#define IRQ_SOURCE_HDMI BIT(8) -#define IRQ_SOURCE_EDP BIT(12) -#define IRQ_SOURCE_MHL BIT(16) - /** * dpu_intr_type - HW Interrupt Type * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done @@ -96,18 +89,6 @@ struct dpu_hw_intr; */ struct dpu_hw_intr_ops { /** - * set_mask - Programs the given interrupt register with the - * given interrupt mask. Register value will get overwritten. - * @intr: HW interrupt handle - * @reg_off: MDSS HW register offset - * @irqmask: IRQ mask value - */ - void (*set_mask)( - struct dpu_hw_intr *intr, - uint32_t reg, - uint32_t irqmask); - - /** * irq_idx_lookup - Lookup IRQ index on the HW interrupt type * Used for all irq related ops * @intr_type: Interrupt type defined in dpu_intr_type @@ -177,16 +158,6 @@ struct dpu_hw_intr_ops { struct dpu_hw_intr *intr); /** - * clear_interrupt_status - Clears HW interrupt status based on given - * lookup IRQ index. - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - */ - void (*clear_interrupt_status)( - struct dpu_hw_intr *intr, - int irq_idx); - - /** * clear_intr_status_nolock() - clears the HW interrupts without lock * @intr: HW interrupt handle * @irq_idx: Lookup irq index return from irq_idx_lookup @@ -206,21 +177,6 @@ struct dpu_hw_intr_ops { struct dpu_hw_intr *intr, int irq_idx, bool clear); - - /** - * get_valid_interrupts - Gets a mask of all valid interrupt sources - * within DPU. These are actually status bits - * within interrupt registers that specify the - * source of the interrupt in IRQs. For example, - * valid interrupt sources can be MDP, DSI, - * HDMI etc. - * @intr: HW interrupt handle - * @mask: Returning the interrupt source MASK - * @return: 0 for success, otherwise failure - */ - int (*get_valid_interrupts)( - struct dpu_hw_intr *intr, - uint32_t *mask); }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 68c54d2c9677..1ab8d4a889f7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -258,12 +258,6 @@ enum dpu_vbif { VBIF_NRT = VBIF_1 }; -enum dpu_iommu_domain { - DPU_IOMMU_DOMAIN_UNSECURE, - DPU_IOMMU_DOMAIN_SECURE, - DPU_IOMMU_DOMAIN_MAX -}; - /** * DPU HW,Component order color map */ @@ -358,7 +352,6 @@ enum dpu_3d_blend_mode { * @alpha_enable: whether the format has an alpha channel * @num_planes: number of planes (including meta data planes) * @fetch_mode: linear, tiled, or ubwc hw fetch behavior - * @is_yuv: is format a yuv variant * @flag: usage bit flags * @tile_width: format tile width * @tile_height: format tile height diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 321fc64ddd0e..efe70c508ee0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -18,7 +18,6 @@ #include "dpu_hw_mdss.h" #define REG_MASK(n) ((BIT(n)) - 1) -struct dpu_format_extended; /* * This is the common struct maintained by each sub block diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 4d67b3c96702..885bf88afa3e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -405,35 +405,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, } } -static void _dpu_kms_initialize_dsi(struct drm_device *dev, +static int _dpu_kms_initialize_dsi(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { struct drm_encoder *encoder = NULL; - int i, rc; + int i, rc = 0; + + if (!(priv->dsi[0] || priv->dsi[1])) + return rc; /*TODO: Support two independent DSI connectors */ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); - if (IS_ERR_OR_NULL(encoder)) { + if (IS_ERR(encoder)) { DPU_ERROR("encoder init failed for dsi display\n"); - return; + return PTR_ERR(encoder); } priv->encoders[priv->num_encoders++] = encoder; for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (!priv->dsi[i]) { - DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i); - return; - } + if (!priv->dsi[i]) + continue; rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", i, rc); - continue; + break; } } + + return rc; } /** @@ -444,16 +447,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, * @dpu_kms: Pointer to dpu kms structure * Returns: Zero on success */ -static void _dpu_kms_setup_displays(struct drm_device *dev, +static int _dpu_kms_setup_displays(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { - _dpu_kms_initialize_dsi(dev, priv, dpu_kms); - /** * Extend this function to initialize other * types of displays */ + + return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); } static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) @@ -516,7 +519,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) * Create encoder and query display drivers to create * bridges and connectors */ - _dpu_kms_setup_displays(dev, priv, dpu_kms); + ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); + if (ret) + goto fail; max_crtc_count = min(catalog->mixer_count, priv->num_encoders); @@ -627,6 +632,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); dpu_kms->vbif[VBIF_RT] = NULL; + if (dpu_kms->hw_mdp) + dpu_hw_mdp_destroy(dpu_kms->hw_mdp); + dpu_kms->hw_mdp = NULL; + if (dpu_kms->mmio) devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); dpu_kms->mmio = NULL; @@ -877,8 +886,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) goto power_error; } - rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio, - dpu_kms->dev); + rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); if (rc) { DPU_ERROR("rm init failed: %d\n", rc); goto power_error; @@ -886,11 +894,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms->rm_init = true; - dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm); - if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) { + dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, + dpu_kms->catalog); + if (IS_ERR(dpu_kms->hw_mdp)) { rc = PTR_ERR(dpu_kms->hw_mdp); - if (!dpu_kms->hw_mdp) - rc = -EINVAL; DPU_ERROR("failed to get hw_mdp: %d\n", rc); dpu_kms->hw_mdp = NULL; goto power_error; @@ -926,16 +933,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) goto hw_intr_init_err; } - /* - * _dpu_kms_drm_obj_init should create the DRM related objects - * i.e. CRTCs, planes, encoders, connectors and so forth - */ - rc = _dpu_kms_drm_obj_init(dpu_kms); - if (rc) { - DPU_ERROR("modeset init failed: %d\n", rc); - goto drm_obj_init_err; - } - dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; @@ -952,6 +949,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms) */ dev->mode_config.allow_fb_modifiers = true; + /* + * _dpu_kms_drm_obj_init should create the DRM related objects + * i.e. CRTCs, planes, encoders, connectors and so forth + */ + rc = _dpu_kms_drm_obj_init(dpu_kms); + if (rc) { + DPU_ERROR("modeset init failed: %d\n", rc); + goto drm_obj_init_err; + } + dpu_vbif_init_memtypes(dpu_kms); pm_runtime_put_sync(&dpu_kms->pdev->dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index cb307a2abf06..7316b4ab1b85 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -23,11 +23,14 @@ struct dpu_mdss { struct dpu_irq_controller irq_controller; }; -static irqreturn_t dpu_mdss_irq(int irq, void *arg) +static void dpu_mdss_irq(struct irq_desc *desc) { - struct dpu_mdss *dpu_mdss = arg; + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); u32 interrupts; + chained_irq_enter(chip, desc); + interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS); while (interrupts) { @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg) hwirq); if (mapping == 0) { DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq); - return IRQ_NONE; + break; } rc = generic_handle_irq(mapping); if (rc < 0) { DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n", hwirq, mapping, rc); - return IRQ_NONE; + break; } interrupts &= ~(1 << hwirq); } - return IRQ_HANDLED; + chained_irq_exit(chip, desc); } static void dpu_mdss_irq_mask(struct irq_data *irqd) @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = { .irq_unmask = dpu_mdss_irq_unmask, }; +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key; + static int dpu_mdss_irqdomain_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { struct dpu_mdss *dpu_mdss = domain->host_data; - int ret; + irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key); irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq); - ret = irq_set_chip_data(irq, dpu_mdss); - - return ret; + return irq_set_chip_data(irq, dpu_mdss); } static const struct irq_domain_ops dpu_mdss_irqdomain_ops = { @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); struct dss_module_power *mp = &dpu_mdss->mp; + int irq; pm_runtime_suspend(dev->dev); pm_runtime_disable(dev->dev); _dpu_mdss_irq_domain_fini(dpu_mdss); - free_irq(platform_get_irq(pdev, 0), dpu_mdss); + irq = platform_get_irq(pdev, 0); + irq_set_chained_handler_and_data(irq, NULL, NULL); msm_dss_put_clk(mp->clk_config, mp->num_clk); devm_kfree(&pdev->dev, mp->clk_config); @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev) struct dpu_mdss *dpu_mdss; struct dss_module_power *mp; int ret = 0; + int irq; dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL); if (!dpu_mdss) @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev) if (ret) goto irq_domain_error; - ret = request_irq(platform_get_irq(pdev, 0), - dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss); - if (ret) { - DPU_ERROR("failed to init irq: %d\n", ret); + irq = platform_get_irq(pdev, 0); + if (irq < 0) goto irq_error; - } + + irq_set_chained_handler_and_data(irq, dpu_mdss_irq, + dpu_mdss); pm_runtime_enable(dev->dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 6aefcd6db46b..b01183b309b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -95,8 +95,6 @@ struct dpu_plane { enum dpu_sspp pipe; uint32_t features; /* capabilities from catalog */ - uint32_t nformats; - uint32_t formats[64]; struct dpu_hw_pipe *pipe_hw; struct dpu_hw_pipe_cfg pipe_cfg; @@ -121,6 +119,12 @@ struct dpu_plane { bool debugfs_default_scale; }; +static const uint64_t supported_format_modifiers[] = { + DRM_FORMAT_MOD_QCOM_COMPRESSED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + #define to_dpu_plane(x) container_of(x, struct dpu_plane, base) static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) @@ -1410,6 +1414,23 @@ static void dpu_plane_early_unregister(struct drm_plane *plane) debugfs_remove_recursive(pdpu->debugfs_root); } +static bool dpu_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, uint64_t modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return true; + + if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) { + int i; + for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) { + if (format == qcom_compressed_supported_formats[i]) + return true; + } + } + + return false; +} + static const struct drm_plane_funcs dpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -1419,6 +1440,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = { .atomic_destroy_state = dpu_plane_destroy_state, .late_register = dpu_plane_late_register, .early_unregister = dpu_plane_early_unregister, + .format_mod_supported = dpu_plane_format_mod_supported, }; static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { @@ -1444,11 +1466,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, unsigned long possible_crtcs, u32 master_plane_id) { struct drm_plane *plane = NULL, *master_plane = NULL; - const struct dpu_format_extended *format_list; + const uint32_t *format_list; struct dpu_plane *pdpu; struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *kms = to_dpu_kms(priv->kms); int zpos_max = DPU_ZPOS_MAX; + uint32_t num_formats; int ret = -EINVAL; /* create and zero local structure */ @@ -1491,24 +1514,18 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, goto clean_sspp; } - if (!master_plane_id) - format_list = pdpu->pipe_sblk->format_list; - else + if (pdpu->is_virtual) { format_list = pdpu->pipe_sblk->virt_format_list; - - pdpu->nformats = dpu_populate_formats(format_list, - pdpu->formats, - 0, - ARRAY_SIZE(pdpu->formats)); - - if (!pdpu->nformats) { - DPU_ERROR("[%u]no valid formats for plane\n", pipe); - goto clean_sspp; + num_formats = pdpu->pipe_sblk->virt_num_formats; + } + else { + format_list = pdpu->pipe_sblk->format_list; + num_formats = pdpu->pipe_sblk->num_formats; } ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, - pdpu->formats, pdpu->nformats, - NULL, type, NULL); + format_list, num_formats, + supported_format_modifiers, type, NULL); if (ret) goto clean_sspp; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 7fed0b627708..0e6063acd041 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -28,23 +28,18 @@ /** * struct dpu_plane_state: Define dpu extension of drm plane state object * @base: base drm plane state object - * @property_state: Local storage for msm_prop properties - * @property_values: cached plane property values * @aspace: pointer to address space for input/output buffers - * @input_fence: dereferenced input fence pointer * @stage: assigned by crtc blender * @multirect_index: index of the rectangle of SSPP * @multirect_mode: parallel or time multiplex multirect mode * @pending: whether the current update is still pending * @scaler3_cfg: configuration data for scaler3 * @pixel_ext: configuration data for pixel extensions - * @scaler_check_state: indicates status of user provided pixel extension data * @cdp_cfg: CDP configuration */ struct dpu_plane_state { struct drm_plane_state base; struct msm_gem_address_space *aspace; - void *input_fence; enum dpu_stage stage; uint32_t multirect_index; uint32_t multirect_mode; @@ -107,12 +102,6 @@ void dpu_plane_restore(struct drm_plane *plane); void dpu_plane_flush(struct drm_plane *plane); /** - * dpu_plane_kickoff - final plane operations before commit kickoff - * @plane: Pointer to drm plane structure - */ -void dpu_plane_kickoff(struct drm_plane *plane); - -/** * dpu_plane_set_error: enable/disable error condition * @plane: pointer to drm_plane structure */ @@ -147,14 +136,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane); void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state); /** - * dpu_plane_wait_input_fence - wait for input fence object - * @plane: Pointer to DRM plane object - * @wait_ms: Wait timeout value - * Returns: Zero on success - */ -int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms); - -/** * dpu_plane_color_fill - enables color fill on plane * @plane: Pointer to DRM plane object * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red @@ -164,12 +145,4 @@ int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms); int dpu_plane_color_fill(struct drm_plane *plane, uint32_t color, uint32_t alpha); -/** - * dpu_plane_set_revalidate - sets revalidate flag which forces a full - * validation of the plane properties in the next atomic check - * @plane: Pointer to DRM plane object - * @enable: Boolean to set/unset the flag - */ -void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable); - #endif /* _DPU_PLANE_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index bdb117709674..037d9f4187f9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -21,8 +21,8 @@ #include "dpu_encoder.h" #include "dpu_trace.h" -#define RESERVED_BY_OTHER(h, r) \ - ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) +#define RESERVED_BY_OTHER(h, r) \ + ((h)->enc_id && (h)->enc_id != r) /** * struct dpu_rm_requirements - Reservation requirements parameter bundle @@ -34,90 +34,21 @@ struct dpu_rm_requirements { struct dpu_encoder_hw_resources hw_res; }; -/** - * struct dpu_rm_rsvp - Use Case Reservation tagging structure - * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain - * By using as a tag, rather than lists of pointers to HW blocks used - * we can avoid some list management since we don't know how many blocks - * of each type a given use case may require. - * @list: List head for list of all reservations - * @seq: Global RSVP sequence number for debugging, especially for - * differentiating differenct allocations for same encoder. - * @enc_id: Reservations are tracked by Encoder DRM object ID. - * CRTCs may be connected to multiple Encoders. - * An encoder or connector id identifies the display path. - */ -struct dpu_rm_rsvp { - struct list_head list; - uint32_t seq; - uint32_t enc_id; -}; /** * struct dpu_rm_hw_blk - hardware block tracking list member * @list: List head for list of all hardware blocks tracking items - * @rsvp: Pointer to use case reservation if reserved by a client - * @rsvp_nxt: Temporary pointer used during reservation to the incoming - * request. Will be swapped into rsvp if proposal is accepted - * @type: Type of hardware block this structure tracks * @id: Hardware ID number, within it's own space, ie. LM_X - * @catalog: Pointer to the hardware catalog entry for this block + * @enc_id: Encoder id to which this blk is binded * @hw: Pointer to the hardware register access object for this block */ struct dpu_rm_hw_blk { struct list_head list; - struct dpu_rm_rsvp *rsvp; - struct dpu_rm_rsvp *rsvp_nxt; - enum dpu_hw_blk_type type; uint32_t id; + uint32_t enc_id; struct dpu_hw_blk *hw; }; -/** - * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging - */ -enum dpu_rm_dbg_rsvp_stage { - DPU_RM_STAGE_BEGIN, - DPU_RM_STAGE_AFTER_CLEAR, - DPU_RM_STAGE_AFTER_RSVPNEXT, - DPU_RM_STAGE_FINAL -}; - -static void _dpu_rm_print_rsvps( - struct dpu_rm *rm, - enum dpu_rm_dbg_rsvp_stage stage) -{ - struct dpu_rm_rsvp *rsvp; - struct dpu_rm_hw_blk *blk; - enum dpu_hw_blk_type type; - - DPU_DEBUG("%d\n", stage); - - list_for_each_entry(rsvp, &rm->rsvps, list) { - DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq, - rsvp->enc_id); - } - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry(blk, &rm->hw_blks[type], list) { - if (!blk->rsvp && !blk->rsvp_nxt) - continue; - - DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage, - (blk->rsvp) ? blk->rsvp->seq : 0, - (blk->rsvp) ? blk->rsvp->enc_id : 0, - (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0, - (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0, - blk->type, blk->id); - } - } -} - -struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm) -{ - return rm->hw_mdp; -} - void dpu_rm_init_hw_iter( struct dpu_rm_hw_iter *iter, uint32_t enc_id, @@ -148,15 +79,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) i->blk = list_prepare_entry(i->blk, blk_list, list); list_for_each_entry_continue(i->blk, blk_list, list) { - struct dpu_rm_rsvp *rsvp = i->blk->rsvp; - - if (i->blk->type != i->type) { - DPU_ERROR("found incorrect block type %d on %d list\n", - i->blk->type, i->type); - return false; - } - - if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) { + if (i->enc_id == i->blk->enc_id) { i->hw = i->blk->hw; DPU_DEBUG("found type %d id %d for enc %d\n", i->type, i->blk->id, i->enc_id); @@ -208,34 +131,18 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) int dpu_rm_destroy(struct dpu_rm *rm) { - - struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; struct dpu_rm_hw_blk *hw_cur, *hw_nxt; enum dpu_hw_blk_type type; - if (!rm) { - DPU_ERROR("invalid rm\n"); - return -EINVAL; - } - - list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) { - list_del(&rsvp_cur->list); - kfree(rsvp_cur); - } - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], list) { list_del(&hw_cur->list); - _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw); + _dpu_rm_hw_destroy(type, hw_cur->hw); kfree(hw_cur); } } - dpu_hw_mdp_destroy(rm->hw_mdp); - rm->hw_mdp = NULL; - mutex_destroy(&rm->rm_lock); return 0; @@ -250,11 +157,8 @@ static int _dpu_rm_hw_blk_create( void *hw_catalog_info) { struct dpu_rm_hw_blk *blk; - struct dpu_hw_mdp *hw_mdp; void *hw; - hw_mdp = rm->hw_mdp; - switch (type) { case DPU_HW_BLK_LM: hw = dpu_hw_lm_init(id, mmio, cat); @@ -290,9 +194,9 @@ static int _dpu_rm_hw_blk_create( return -ENOMEM; } - blk->type = type; blk->id = id; blk->hw = hw; + blk->enc_id = 0; list_add_tail(&blk->list, &rm->hw_blks[type]); return 0; @@ -300,13 +204,12 @@ static int _dpu_rm_hw_blk_create( int dpu_rm_init(struct dpu_rm *rm, struct dpu_mdss_cfg *cat, - void __iomem *mmio, - struct drm_device *dev) + void __iomem *mmio) { int rc, i; enum dpu_hw_blk_type type; - if (!rm || !cat || !mmio || !dev) { + if (!rm || !cat || !mmio) { DPU_ERROR("invalid kms\n"); return -EINVAL; } @@ -316,21 +219,9 @@ int dpu_rm_init(struct dpu_rm *rm, mutex_init(&rm->rm_lock); - INIT_LIST_HEAD(&rm->rsvps); for (type = 0; type < DPU_HW_BLK_MAX; type++) INIT_LIST_HEAD(&rm->hw_blks[type]); - rm->dev = dev; - - /* Some of the sub-blocks require an mdptop to be created */ - rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat); - if (IS_ERR_OR_NULL(rm->hw_mdp)) { - rc = PTR_ERR(rm->hw_mdp); - rm->hw_mdp = NULL; - DPU_ERROR("failed: mdp hw not available\n"); - goto fail; - } - /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { struct dpu_lm_cfg *lm = &cat->mixer[i]; @@ -410,7 +301,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * proposed use case requirements, incl. hardwired dependent blocks like * pingpong * @rm: dpu resource manager handle - * @rsvp: reservation currently being created + * @enc_id: encoder id requesting for allocation * @reqs: proposed use case requirements * @lm: proposed layer mixer, function checks if lm, and all other hardwired * blocks connected to the lm (pp) is available and appropriate @@ -422,7 +313,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) */ static bool _dpu_rm_check_lm_and_get_connected_blks( struct dpu_rm *rm, - struct dpu_rm_rsvp *rsvp, + uint32_t enc_id, struct dpu_rm_requirements *reqs, struct dpu_rm_hw_blk *lm, struct dpu_rm_hw_blk **pp, @@ -449,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( } /* Already reserved? */ - if (RESERVED_BY_OTHER(lm, rsvp)) { + if (RESERVED_BY_OTHER(lm, enc_id)) { DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); return false; } @@ -467,7 +358,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( return false; } - if (RESERVED_BY_OTHER(*pp, rsvp)) { + if (RESERVED_BY_OTHER(*pp, enc_id)) { DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, (*pp)->id); return false; @@ -476,10 +367,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( return true; } -static int _dpu_rm_reserve_lms( - struct dpu_rm *rm, - struct dpu_rm_rsvp *rsvp, - struct dpu_rm_requirements *reqs) +static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, + struct dpu_rm_requirements *reqs) { struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; @@ -504,7 +393,7 @@ static int _dpu_rm_reserve_lms( lm[lm_count] = iter_i.blk; if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, rsvp, reqs, lm[lm_count], + rm, enc_id, reqs, lm[lm_count], &pp[lm_count], NULL)) continue; @@ -519,7 +408,7 @@ static int _dpu_rm_reserve_lms( continue; if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, rsvp, reqs, iter_j.blk, + rm, enc_id, reqs, iter_j.blk, &pp[lm_count], iter_i.blk)) continue; @@ -537,11 +426,10 @@ static int _dpu_rm_reserve_lms( if (!lm[i]) break; - lm[i]->rsvp_nxt = rsvp; - pp[i]->rsvp_nxt = rsvp; + lm[i]->enc_id = enc_id; + pp[i]->enc_id = enc_id; - trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id, - pp[i]->id); + trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); } return rc; @@ -549,7 +437,7 @@ static int _dpu_rm_reserve_lms( static int _dpu_rm_reserve_ctls( struct dpu_rm *rm, - struct dpu_rm_rsvp *rsvp, + uint32_t enc_id, const struct msm_display_topology *top) { struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; @@ -570,7 +458,7 @@ static int _dpu_rm_reserve_ctls( unsigned long features = ctl->caps->features; bool has_split_display; - if (RESERVED_BY_OTHER(iter.blk, rsvp)) + if (RESERVED_BY_OTHER(iter.blk, enc_id)) continue; has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; @@ -591,9 +479,8 @@ static int _dpu_rm_reserve_ctls( return -ENAVAIL; for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { - ctls[i]->rsvp_nxt = rsvp; - trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, - rsvp->enc_id); + ctls[i]->enc_id = enc_id; + trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); } return 0; @@ -601,7 +488,7 @@ static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_intf( struct dpu_rm *rm, - struct dpu_rm_rsvp *rsvp, + uint32_t enc_id, uint32_t id, enum dpu_hw_blk_type type) { @@ -614,14 +501,13 @@ static int _dpu_rm_reserve_intf( if (iter.blk->id != id) continue; - if (RESERVED_BY_OTHER(iter.blk, rsvp)) { + if (RESERVED_BY_OTHER(iter.blk, enc_id)) { DPU_ERROR("type %d id %d already reserved\n", type, id); return -ENAVAIL; } - iter.blk->rsvp_nxt = rsvp; - trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type, - rsvp->enc_id); + iter.blk->enc_id = enc_id; + trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); break; } @@ -636,7 +522,7 @@ static int _dpu_rm_reserve_intf( static int _dpu_rm_reserve_intf_related_hw( struct dpu_rm *rm, - struct dpu_rm_rsvp *rsvp, + uint32_t enc_id, struct dpu_encoder_hw_resources *hw_res) { int i, ret = 0; @@ -646,7 +532,7 @@ static int _dpu_rm_reserve_intf_related_hw( if (hw_res->intfs[i] == INTF_MODE_NONE) continue; id = i + INTF_0; - ret = _dpu_rm_reserve_intf(rm, rsvp, id, + ret = _dpu_rm_reserve_intf(rm, enc_id, id, DPU_HW_BLK_INTF); if (ret) return ret; @@ -655,33 +541,27 @@ static int _dpu_rm_reserve_intf_related_hw( return ret; } -static int _dpu_rm_make_next_rsvp( +static int _dpu_rm_make_reservation( struct dpu_rm *rm, struct drm_encoder *enc, struct drm_crtc_state *crtc_state, - struct dpu_rm_rsvp *rsvp, struct dpu_rm_requirements *reqs) { int ret; - /* Create reservation info, tag reserved blocks with it as we go */ - rsvp->seq = ++rm->rsvp_next_seq; - rsvp->enc_id = enc->base.id; - list_add_tail(&rsvp->list, &rm->rsvps); - - ret = _dpu_rm_reserve_lms(rm, rsvp, reqs); + ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); if (ret) { DPU_ERROR("unable to find appropriate mixers\n"); return ret; } - ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology); + ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); if (ret) { DPU_ERROR("unable to find appropriate CTL\n"); return ret; } - ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); + ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); if (ret) return ret; @@ -706,108 +586,31 @@ static int _dpu_rm_populate_requirements( return 0; } -static struct dpu_rm_rsvp *_dpu_rm_get_rsvp( - struct dpu_rm *rm, - struct drm_encoder *enc) +static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) { - struct dpu_rm_rsvp *i; - - if (!rm || !enc) { - DPU_ERROR("invalid params\n"); - return NULL; - } - - if (list_empty(&rm->rsvps)) - return NULL; - - list_for_each_entry(i, &rm->rsvps, list) - if (i->enc_id == enc->base.id) - return i; - - return NULL; -} - -/** - * _dpu_rm_release_rsvp - release resources and release a reservation - * @rm: KMS handle - * @rsvp: RSVP pointer to release and release resources for - */ -static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) -{ - struct dpu_rm_rsvp *rsvp_c, *rsvp_n; struct dpu_rm_hw_blk *blk; enum dpu_hw_blk_type type; - if (!rsvp) - return; - - DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id); - - list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) { - if (rsvp == rsvp_c) { - list_del(&rsvp_c->list); - break; - } - } - for (type = 0; type < DPU_HW_BLK_MAX; type++) { list_for_each_entry(blk, &rm->hw_blks[type], list) { - if (blk->rsvp == rsvp) { - blk->rsvp = NULL; - DPU_DEBUG("rel rsvp %d enc %d %d %d\n", - rsvp->seq, rsvp->enc_id, - blk->type, blk->id); - } - if (blk->rsvp_nxt == rsvp) { - blk->rsvp_nxt = NULL; - DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n", - rsvp->seq, rsvp->enc_id, - blk->type, blk->id); + if (blk->enc_id == enc_id) { + blk->enc_id = 0; + DPU_DEBUG("rel enc %d %d %d\n", enc_id, + type, blk->id); } } } - - kfree(rsvp); } void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) { - struct dpu_rm_rsvp *rsvp; - - if (!rm || !enc) { - DPU_ERROR("invalid params\n"); - return; - } - mutex_lock(&rm->rm_lock); - rsvp = _dpu_rm_get_rsvp(rm, enc); - if (!rsvp) { - DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id); - goto end; - } + _dpu_rm_release_reservation(rm, enc->base.id); - _dpu_rm_release_rsvp(rm, rsvp); -end: mutex_unlock(&rm->rm_lock); } -static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) -{ - struct dpu_rm_hw_blk *blk; - enum dpu_hw_blk_type type; - - /* Swap next rsvp to be the active */ - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry(blk, &rm->hw_blks[type], list) { - if (blk->rsvp_nxt) { - blk->rsvp = blk->rsvp_nxt; - blk->rsvp_nxt = NULL; - } - } - } -} - int dpu_rm_reserve( struct dpu_rm *rm, struct drm_encoder *enc, @@ -815,7 +618,6 @@ int dpu_rm_reserve( struct msm_display_topology topology, bool test_only) { - struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; struct dpu_rm_requirements reqs; int ret; @@ -828,8 +630,6 @@ int dpu_rm_reserve( mutex_lock(&rm->rm_lock); - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN); - ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, topology); if (ret) { @@ -837,50 +637,17 @@ int dpu_rm_reserve( goto end; } - /* - * We only support one active reservation per-hw-block. But to implement - * transactional semantics for test-only, and for allowing failure while - * modifying your existing reservation, over the course of this - * function we can have two reservations: - * Current: Existing reservation - * Next: Proposed reservation. The proposed reservation may fail, or may - * be discarded if in test-only mode. - * If reservation is successful, and we're not in test-only, then we - * replace the current with the next. - */ - rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL); - if (!rsvp_nxt) { - ret = -ENOMEM; - goto end; - } - - rsvp_cur = _dpu_rm_get_rsvp(rm, enc); - - /* Check the proposed reservation, store it in hw's "next" field */ - ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs); - - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT); - + ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); if (ret) { DPU_ERROR("failed to reserve hw resources: %d\n", ret); - _dpu_rm_release_rsvp(rm, rsvp_nxt); + _dpu_rm_release_reservation(rm, enc->base.id); } else if (test_only) { - /* - * Normally, if test_only, test the reservation and then undo - * However, if the user requests LOCK, then keep the reservation - * made during the atomic_check phase. - */ - DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n", - rsvp_nxt->seq, rsvp_nxt->enc_id); - _dpu_rm_release_rsvp(rm, rsvp_nxt); - } else { - _dpu_rm_release_rsvp(rm, rsvp_cur); - - _dpu_rm_commit_rsvp(rm, rsvp_nxt); + /* test_only: test the reservation and then undo */ + DPU_DEBUG("test_only: discard test [enc: %d]\n", + enc->base.id); + _dpu_rm_release_reservation(rm, enc->base.id); } - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL); - end: mutex_unlock(&rm->rm_lock); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index b8273bd23801..381611fc5877 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -22,22 +22,14 @@ /** * struct dpu_rm - DPU dynamic hardware resource manager - * @dev: device handle for event logging purposes - * @rsvps: list of hardware reservations by each crtc->encoder->connector * @hw_blks: array of lists of hardware resources present in the system, one * list per type of hardware block - * @hw_mdp: hardware object for mdp_top * @lm_max_width: cached layer mixer maximum width - * @rsvp_next_seq: sequence number for next reservation for debugging purposes * @rm_lock: resource manager mutex */ struct dpu_rm { - struct drm_device *dev; - struct list_head rsvps; struct list_head hw_blks[DPU_HW_BLK_MAX]; - struct dpu_hw_mdp *hw_mdp; uint32_t lm_max_width; - uint32_t rsvp_next_seq; struct mutex rm_lock; }; @@ -67,13 +59,11 @@ struct dpu_rm_hw_iter { * @rm: DPU Resource Manager handle * @cat: Pointer to hardware catalog * @mmio: mapped register io address of MDP - * @dev: device handle for event logging purposes * @Return: 0 on Success otherwise -ERROR */ int dpu_rm_init(struct dpu_rm *rm, struct dpu_mdss_cfg *cat, - void __iomem *mmio, - struct drm_device *dev); + void __iomem *mmio); /** * dpu_rm_destroy - Free all memory allocated by dpu_rm_init @@ -112,14 +102,6 @@ int dpu_rm_reserve(struct dpu_rm *rm, void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); /** - * dpu_rm_get_mdp - Retrieve HW block for MDP TOP. - * This is never reserved, and is usable by any display. - * @rm: DPU Resource Manager handle - * @Return: Pointer to hw block or NULL - */ -struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm); - -/** * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list * using dpu_rm_get_hw * @iter: iter object to initialize @@ -144,12 +126,4 @@ void dpu_rm_init_hw_iter( * @Return: true on match found, false on no match found */ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); - -/** - * dpu_rm_check_property_topctl - validate property bitmask before it is set - * @val: user's proposed topology control bitmask - * @Return: 0 on success or error - */ -int dpu_rm_check_property_topctl(uint64_t val); - #endif /* __DPU_RM_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index c78b521ceda1..8bb46090bd16 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -831,48 +831,42 @@ TRACE_EVENT(dpu_plane_disable, ); DECLARE_EVENT_CLASS(dpu_rm_iter_template, - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), - TP_ARGS(id, type, enc_id), + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id), TP_STRUCT__entry( __field( uint32_t, id ) - __field( enum dpu_hw_blk_type, type ) __field( uint32_t, enc_id ) ), TP_fast_assign( __entry->id = id; - __entry->type = type; __entry->enc_id = enc_id; ), - TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type, - __entry->enc_id) + TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id) ); DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), - TP_ARGS(id, type, enc_id) + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id) ); DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls, - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), - TP_ARGS(id, type, enc_id) + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id) ); TRACE_EVENT(dpu_rm_reserve_lms, - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id, - uint32_t pp_id), - TP_ARGS(id, type, enc_id, pp_id), + TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id), + TP_ARGS(id, enc_id, pp_id), TP_STRUCT__entry( __field( uint32_t, id ) - __field( enum dpu_hw_blk_type, type ) __field( uint32_t, enc_id ) __field( uint32_t, pp_id ) ), TP_fast_assign( __entry->id = id; - __entry->type = type; __entry->enc_id = enc_id; __entry->pp_id = pp_id; ), - TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id, - __entry->type, __entry->enc_id, __entry->pp_id) + TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id, + __entry->enc_id, __entry->pp_id) ); TRACE_EVENT(dpu_vbif_wait_xin_halt_fail, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 0689194b295d..4697d854b827 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -207,62 +207,44 @@ u32 msm_readl(const void __iomem *addr) return val; } -struct vblank_event { - struct list_head node; +struct msm_vblank_work { + struct work_struct work; int crtc_id; bool enable; + struct msm_drm_private *priv; }; -static void vblank_ctrl_worker(struct kthread_work *work) +static void vblank_ctrl_worker(struct work_struct *work) { - struct msm_vblank_ctrl *vbl_ctrl = container_of(work, - struct msm_vblank_ctrl, work); - struct msm_drm_private *priv = container_of(vbl_ctrl, - struct msm_drm_private, vblank_ctrl); + struct msm_vblank_work *vbl_work = container_of(work, + struct msm_vblank_work, work); + struct msm_drm_private *priv = vbl_work->priv; struct msm_kms *kms = priv->kms; - struct vblank_event *vbl_ev, *tmp; - unsigned long flags; - - spin_lock_irqsave(&vbl_ctrl->lock, flags); - list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { - list_del(&vbl_ev->node); - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); - - if (vbl_ev->enable) - kms->funcs->enable_vblank(kms, - priv->crtcs[vbl_ev->crtc_id]); - else - kms->funcs->disable_vblank(kms, - priv->crtcs[vbl_ev->crtc_id]); - - kfree(vbl_ev); - spin_lock_irqsave(&vbl_ctrl->lock, flags); - } + if (vbl_work->enable) + kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); + else + kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + kfree(vbl_work); } static int vblank_ctrl_queue_work(struct msm_drm_private *priv, int crtc_id, bool enable) { - struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; - struct vblank_event *vbl_ev; - unsigned long flags; + struct msm_vblank_work *vbl_work; - vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); - if (!vbl_ev) + vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); + if (!vbl_work) return -ENOMEM; - vbl_ev->crtc_id = crtc_id; - vbl_ev->enable = enable; + INIT_WORK(&vbl_work->work, vblank_ctrl_worker); - spin_lock_irqsave(&vbl_ctrl->lock, flags); - list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + vbl_work->crtc_id = crtc_id; + vbl_work->enable = enable; + vbl_work->priv = priv; - kthread_queue_work(&priv->disp_thread[crtc_id].worker, - &vbl_ctrl->work); + queue_work(priv->wq, &vbl_work->work); return 0; } @@ -274,31 +256,20 @@ static int msm_drm_uninit(struct device *dev) struct msm_drm_private *priv = ddev->dev_private; struct msm_kms *kms = priv->kms; struct msm_mdss *mdss = priv->mdss; - struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; - struct vblank_event *vbl_ev, *tmp; int i; /* We must cancel and cleanup any pending vblank enable/disable * work before drm_irq_uninstall() to avoid work re-enabling an * irq after uninstall has disabled it. */ - kthread_flush_work(&vbl_ctrl->work); - list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { - list_del(&vbl_ev->node); - kfree(vbl_ev); - } - /* clean up display commit/event worker threads */ - for (i = 0; i < priv->num_crtcs; i++) { - if (priv->disp_thread[i].thread) { - kthread_flush_worker(&priv->disp_thread[i].worker); - kthread_stop(priv->disp_thread[i].thread); - priv->disp_thread[i].thread = NULL; - } + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + /* clean up event worker threads */ + for (i = 0; i < priv->num_crtcs; i++) { if (priv->event_thread[i].thread) { - kthread_flush_worker(&priv->event_thread[i].worker); - kthread_stop(priv->event_thread[i].thread); + kthread_destroy_worker(&priv->event_thread[i].worker); priv->event_thread[i].thread = NULL; } } @@ -323,9 +294,6 @@ static int msm_drm_uninit(struct device *dev) drm_irq_uninstall(ddev); pm_runtime_put_sync(dev); - flush_workqueue(priv->wq); - destroy_workqueue(priv->wq); - if (kms && kms->funcs) kms->funcs->destroy(kms); @@ -490,9 +458,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) priv->wq = alloc_ordered_workqueue("msm", 0); INIT_LIST_HEAD(&priv->inactive_list); - INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); - kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker); - spin_lock_init(&priv->vblank_ctrl.lock); drm_mode_config_init(ddev); @@ -554,27 +519,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) */ param.sched_priority = 16; for (i = 0; i < priv->num_crtcs; i++) { - - /* initialize display thread */ - priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; - kthread_init_worker(&priv->disp_thread[i].worker); - priv->disp_thread[i].dev = ddev; - priv->disp_thread[i].thread = - kthread_run(kthread_worker_fn, - &priv->disp_thread[i].worker, - "crtc_commit:%d", priv->disp_thread[i].crtc_id); - if (IS_ERR(priv->disp_thread[i].thread)) { - DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n"); - priv->disp_thread[i].thread = NULL; - goto err_msm_uninit; - } - - ret = sched_setscheduler(priv->disp_thread[i].thread, - SCHED_FIFO, ¶m); - if (ret) - dev_warn(dev, "disp_thread set priority failed: %d\n", - ret); - /* initialize event thread */ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; kthread_init_worker(&priv->event_thread[i].worker); @@ -589,13 +533,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto err_msm_uninit; } - /** - * event thread should also run at same priority as disp_thread - * because it is handling frame_done events. A lower priority - * event thread and higher priority disp_thread can causes - * frame_pending counters beyond 2. This can lead to commit - * failure at crtc commit level. - */ ret = sched_setscheduler(priv->event_thread[i].thread, SCHED_FIFO, ¶m); if (ret) @@ -914,8 +851,12 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = -EINVAL; break; } - ret = copy_from_user(msm_obj->name, - u64_to_user_ptr(args->value), args->len); + if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), + args->len)) { + msm_obj->name[0] = '\0'; + ret = -EFAULT; + break; + } msm_obj->name[args->len] = '\0'; for (i = 0; i < args->len; i++) { if (!isprint(msm_obj->name[i])) { @@ -931,8 +872,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, } args->len = strlen(msm_obj->name); if (args->value) { - ret = copy_to_user(u64_to_user_ptr(args->value), - msm_obj->name, args->len); + if (copy_to_user(u64_to_user_ptr(args->value), + msm_obj->name, args->len)) + ret = -EFAULT; } break; } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index a3e7c95e186e..163e24d2ab99 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -77,12 +77,6 @@ enum msm_mdp_plane_property { PLANE_PROP_MAX_NUM }; -struct msm_vblank_ctrl { - struct kthread_work work; - struct list_head event_list; - spinlock_t lock; -}; - #define MSM_GPU_MAX_RINGS 4 #define MAX_H_TILES_PER_DISPLAY 2 @@ -126,7 +120,7 @@ struct msm_display_topology { /** * struct msm_display_info - defines display properties - * @intf_type: DRM_MODE_CONNECTOR_ display type + * @intf_type: DRM_MODE_ENCODER_ type * @capabilities: Bitmask of display flags * @num_of_h_tiles: Number of horizontal tiles in case of split interface * @h_tile_instance: Controller instance used per tile. Number of elements is @@ -199,7 +193,6 @@ struct msm_drm_private { unsigned int num_crtcs; struct drm_crtc *crtcs[MAX_CRTCS]; - struct msm_drm_thread disp_thread[MAX_CRTCS]; struct msm_drm_thread event_thread[MAX_CRTCS]; unsigned int num_encoders; @@ -228,7 +221,6 @@ struct msm_drm_private { struct notifier_block vmap_notifier; struct shrinker shrinker; - struct msm_vblank_ctrl vblank_ctrl; struct drm_atomic_state *pm_state; }; |