diff options
author | Linus Torvalds | 2019-09-27 11:13:35 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-09-27 11:13:35 -0700 |
commit | 289991ce1cac18e7cd489902986ef986baa49568 (patch) | |
tree | b04c83b7d931c91fd71d68d9e5c6d572b324a320 /drivers/gpu | |
parent | 0cd81d77d0569f1dc1e39abeea93c6184e9b5b54 (diff) | |
parent | 3e2cb6d89325dc36a03937a2b82fc7eb902c96b0 (diff) |
Merge tag 'drm-next-2019-09-27' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Fixes built up over the past 1.5 weeks or so, it's two weeks of
amdgpu, some core cleanups and some panfrost fixes. I also finally
figured out why my desktop was slow to do a bunch of stuff (someone
gave it an IPv6 address which can't reach anything!).
core:
- Some cleanups and fixes in the self-refresh helpers
- Some cleanups and fixes in the atomic helpers
amdgpu:
- Fix a 64 bit divide
- Prevent a memory leak in a failure case in dc
- Load proper gfx firmware on navi14 variants
- Add more navi12 and navi14 PCI ids
- Misc fixes for renoir
- Fix bandwidth issues with multiple displays on vega20
- Support for Dali
- Fix a possible oops with KFD on hawaii
- Fix for backlight level after resume on some APUs
- Other misc fixes
panfrost:
- Multiple panfrost fixes for regulator support and page fault
handling"
* tag 'drm-next-2019-09-27' of git://anongit.freedesktop.org/drm/drm: (34 commits)
drm/amd/display: prevent memory leak
drm/amdgpu/gfx10: add support for wks firmware loading
drm/amdgpu/display: include slab.h in dcn21_resource.c
drm/amdgpu/display: fix 64 bit divide
drm/panfrost: Prevent race when handling page fault
drm/panfrost: Remove NULL checks for regulator
drm/panfrost: Fix regulator_get_optional() misuse
drm: Measure Self Refresh Entry/Exit times to avoid thrashing
drm: Fix kerneldoc and remove unused struct member in self_refresh helper
drm/atomic: Rename crtc_state->pageflip_flags to async_flip
drm/atomic: Reject FLIP_ASYNC unconditionally
drm/atomic: Take the atomic toys away from X
drm/amdgpu: flag navi12 and 14 as experimental for 5.4
drm/kms: Duct-tape for mode object lifetime checks
drm/amdgpu: add navi12 pci id
drm/amdgpu: add navi14 PCI ID for work station SKU
drm/amdkfd: Swap trap temporary registers in gfx10 trap handler
drm/amd/powerplay: implement sysfs for getting dpm clock
drm/amd/display: Restore backlight brightness after system resume
drm/amd/display: Implement voltage limitation for dali
...
Diffstat (limited to 'drivers/gpu')
44 files changed, 436 insertions, 138 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 61bd10310604..5803fcbae22f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -948,6 +948,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_VCE: + case AMD_IP_BLOCK_TYPE_SDMA: if (swsmu) ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); else @@ -956,7 +957,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: - case AMD_IP_BLOCK_TYPE_SDMA: ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bdf849da32e4..264677ab248a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1012,11 +1012,16 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ - {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, + /* Navi12 */ + {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 7850084a05e3..60655834d649 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -143,7 +143,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* ring tests don't use a job */ if (job) { vm = job->vm; - fence_ctx = job->base.s_fence->scheduled.context; + fence_ctx = job->base.s_fence ? + job->base.s_fence->scheduled.context : 0; } else { vm = NULL; fence_ctx = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0e2ec608530b..f6147528be64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -677,6 +677,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + if (info->read_mmr_reg.count > 128) + return -EINVAL; + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index db28823891ac..638c821611ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -70,6 +70,11 @@ MODULE_FIRMWARE("amdgpu/navi10_mec.bin"); MODULE_FIRMWARE("amdgpu/navi10_mec2.bin"); MODULE_FIRMWARE("amdgpu/navi10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin"); +MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin"); +MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin"); +MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin"); +MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin"); MODULE_FIRMWARE("amdgpu/navi14_ce.bin"); MODULE_FIRMWARE("amdgpu/navi14_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi14_me.bin"); @@ -594,7 +599,8 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; + char fw_name[40]; + char wks[10]; int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; @@ -607,12 +613,16 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); + memset(wks, 0, sizeof(wks)); switch (adev->asic_type) { case CHIP_NAVI10: chip_name = "navi10"; break; case CHIP_NAVI14: chip_name = "navi14"; + if (!(adev->pdev->device == 0x7340 && + adev->pdev->revision != 0x00)) + snprintf(wks, sizeof(wks), "_wks"); break; case CHIP_NAVI12: chip_name = "navi12"; @@ -621,7 +631,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); if (err) goto out; @@ -632,7 +642,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); if (err) goto out; @@ -643,7 +653,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); if (err) goto out; @@ -708,7 +718,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) if (adev->gfx.rlc.is_rlc_v2_1) gfx_v10_0_init_rlc_ext_microcode(adev); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); if (err) goto out; @@ -719,7 +729,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); if (!err) { err = amdgpu_ucode_validate(adev->gfx.mec2_fw); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 83d45f98a461..dcadc73bffd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1650,7 +1650,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_RENOIR: gfx_v9_0_init_lbpw(adev); break; case CHIP_VEGA20: @@ -3026,7 +3025,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_RENOIR: if (amdgpu_lbpw == 0) gfx_v9_0_enable_lbpw(adev, false); else diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index ff18b3a57892..78452cf0115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1889,8 +1889,9 @@ static int sdma_v4_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) + if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) || + adev->asic_type == CHIP_RENOIR) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); if (!amdgpu_sriov_vf(adev)) @@ -1917,8 +1918,9 @@ static int sdma_v4_0_hw_fini(void *handle) sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs - && adev->powerplay.pp_funcs->set_powergating_by_smu) + if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs + && adev->powerplay.pp_funcs->set_powergating_by_smu) || + adev->asic_type == CHIP_RENOIR) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index 4a5951036927..c44723c267c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -493,7 +493,15 @@ static void smu_v11_0_i2c_fini(struct i2c_adapter *control) } /* Restore clock gating */ - smu_v11_0_i2c_set_clock_gating(control, true); + + /* + * TODO Reenabling clock gating seems to break subsequent SMU operation + * on the I2C bus. My guess is that SMU doesn't disable clock gating like + * we do here before working with the bus. So for now just don't restore + * it but later work with SMU to see if they have this issue and can + * update their code appropriately + */ + /* smu_v11_0_i2c_set_clock_gating(control, true); */ } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index a8cf82d46109..901fe3590165 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -694,10 +694,10 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x003f8000, 0x8f6f896f, 0x88776f77, 0x8a6eff6e, 0x023f8000, 0xb9eef807, - 0xb970f812, 0xb971f813, - 0x8ff08870, 0xf4051bb8, + 0xb97af812, 0xb97bf813, + 0x8ffa887a, 0xf4051bbd, 0xfa000000, 0xbf8cc07f, - 0xf4051c38, 0xfa000008, + 0xf4051ebd, 0xfa000008, 0xbf8cc07f, 0x87ee6e6e, 0xbf840001, 0xbe80206e, 0xb971f803, 0x8771ff71, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 35986219ce5f..cdaa523ce6be 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -187,12 +187,12 @@ L_FETCH_2ND_TRAP: // Read second-level TBA/TMA from first-level TMA and jump if available. // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data) // ttmp12 holds SQ_WAVE_STATUS - s_getreg_b32 ttmp4, hwreg(HW_REG_SHADER_TMA_LO) - s_getreg_b32 ttmp5, hwreg(HW_REG_SHADER_TMA_HI) - s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 - s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA + s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO) + s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI) + s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 + s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA s_waitcnt lgkmcnt(0) - s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA + s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA s_waitcnt lgkmcnt(0) s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3] s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e1b09bb432bd..8cab6da512a0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2113,6 +2113,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) } static const struct backlight_ops amdgpu_dm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, .get_brightness = amdgpu_dm_backlight_get_brightness, .update_status = amdgpu_dm_backlight_update_status, }; @@ -2384,6 +2385,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + if (adev->asic_type == CHIP_RENOIR) + dm->dc->debug.disable_stutter = true; return 0; fail: @@ -5770,8 +5773,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * change FB pitch, DCC state, rotation or mirroing. */ bundle->flip_addrs[planes_count].flip_immediate = - (crtc->state->pageflip_flags & - DRM_MODE_PAGE_FLIP_ASYNC) != 0 && + crtc->state->async_flip && acrtc_state->update_type == UPDATE_TYPE_FAST; timestamp_ns = ktime_get_ns(); @@ -6348,7 +6350,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) amdgpu_dm_enable_crtc_interrupts(dev, state, true); for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) - if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) + if (new_crtc_state->async_flip) wait_for_vblank = false; /* update planes when needed per crtc*/ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 383f4f8db8f4..9b2cb57bf2ba 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -708,6 +708,10 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev) { + /* for dali, the highest voltage level we want is 0 */ + if (ASICREV_IS_DALI(hw_internal_rev)) + return 0; + /* we are ok with all levels */ return 4; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 5cc3acccda2a..b1e657e137a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -98,11 +98,14 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) struct dc_stream_state *stream = context->streams[j]; uint32_t vertical_blank_in_pixels = 0; uint32_t vertical_blank_time = 0; + uint32_t vertical_total_min = stream->timing.v_total; + struct dc_crtc_timing_adjust adjust = stream->adjust; + if (adjust.v_total_max != adjust.v_total_min) + vertical_total_min = adjust.v_total_min; vertical_blank_in_pixels = stream->timing.h_total * - (stream->timing.v_total + (vertical_total_min - stream->timing.v_addressable); - vertical_blank_time = vertical_blank_in_pixels * 10000 / stream->timing.pix_clk_100hz; @@ -171,6 +174,10 @@ void dce11_pplib_apply_display_requirements( struct dc_state *context) { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; pp_display_cfg->all_displays_in_sync = context->bw_ctx.bw.dce.all_displays_in_sync; @@ -183,8 +190,20 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->cpu_pstate_separation_time = context->bw_ctx.bw.dce.blackout_recovery_time_us; - pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz - / MEMORY_TYPE_MULTIPLIER_CZ; + /* + * TODO: determine whether the bandwidth has reached memory's limitation + * , then change minimum memory clock based on real-time bandwidth + * limitation. + */ + if (ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) { + pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz, + (uint32_t) div64_s64( + div64_s64(dc->bw_vbios->high_yclk.value, + memory_type_multiplier), 10000)); + } else { + pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz + / memory_type_multiplier; + } pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( dc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 1488ffddf4e3..31b698bf9cfc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -148,7 +148,7 @@ static void dce_mi_program_pte_vm( pte->min_pte_before_flip_horiz_scan; REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, - GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff); + GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f); REG_UPDATE_3(DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, page_width, @@ -157,7 +157,7 @@ static void dce_mi_program_pte_vm( REG_UPDATE_2(DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk, - DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff); + DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f); } static void program_urgency_watermark( diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index afc61055eca1..1787b9bf800a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -1091,6 +1091,7 @@ struct resource_pool *dce100_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index c66fe170e1e8..318e9c2e2ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1462,6 +1462,7 @@ struct resource_pool *dce110_create_resource_pool( if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 3ac4c7e73050..83e1878161c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -987,6 +987,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; struct dm_pp_clock_levels clks = {0}; + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; /*do system clock TODO PPLIB: after PPLIB implement, * then remove old way @@ -1026,12 +1030,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + clks.clocks_in_khz[0] * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, + clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, + clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, 1000); return; @@ -1067,12 +1071,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select @@ -1338,6 +1342,7 @@ struct resource_pool *dce112_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 7d08154e9662..8b85e5274bba 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -847,6 +847,8 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) int i; unsigned int clk; unsigned int latency; + /*original logic in dal3*/ + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( @@ -905,13 +907,16 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ + if (dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; + dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select @@ -1203,6 +1208,7 @@ struct resource_pool *dce120_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 5a89e462e7cc..59305e411a66 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1570,6 +1570,7 @@ struct resource_pool *dcn10_create_resource_pool( if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 3ca5139f1273..de182185fe1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,6 +23,8 @@ * */ +#include <linux/slab.h> + #include "dm_services.h" #include "dc.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 34485d9de78a..8572678f8d4f 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -35,12 +35,10 @@ #include "hw_factory_dcn21.h" - #include "dcn/dcn_2_1_0_offset.h" #include "dcn/dcn_2_1_0_sh_mask.h" #include "renoir_ip_offset.h" - #include "reg_helper.h" #include "../hpd_regs.h" /* begin ********************* @@ -136,6 +134,39 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 6) }; +#include "../generic_regs.h" + +/* set field name */ +#define SF_GENERIC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define generic_regs(id) \ +{\ + GENERIC_REG_LIST(id)\ +} + +static const struct generic_registers generic_regs[] = { + generic_regs(A), +}; + +static const struct generic_sh_mask generic_shift[] = { + GENERIC_MASK_SH_LIST(__SHIFT, A), +}; + +static const struct generic_sh_mask generic_mask[] = { + GENERIC_MASK_SH_LIST(_MASK, A), +}; + +static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin); + + generic->regs = &generic_regs[en]; + generic->shifts = &generic_shift[en]; + generic->masks = &generic_mask[en]; + generic->base.regs = &generic_regs[en].gpio; +} + static void define_ddc_registers( struct hw_gpio_pin *pin, uint32_t en) @@ -181,7 +212,8 @@ static const struct hw_factory_funcs funcs = { .get_hpd_pin = dal_hw_hpd_get_pin, .get_generic_pin = dal_hw_generic_get_pin, .define_hpd_registers = define_hpd_registers, - .define_ddc_registers = define_ddc_registers + .define_ddc_registers = define_ddc_registers, + .define_generic_registers = define_generic_registers }; /* * dal_hw_factory_dcn10_init diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index ad7c43746291..fbb58fb8c318 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -58,7 +58,6 @@ #define SF_HPD(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix - /* macros to expend register list macro defined in HW object header file * end *********************/ @@ -71,7 +70,7 @@ static bool offset_to_id( { switch (offset) { /* GENERIC */ - case REG(DC_GENERICA): + case REG(DC_GPIO_GENERIC_A): *id = GPIO_ID_GENERIC; switch (mask) { case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK: diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 1cc1c8ce633b..bef224bf803e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -31,6 +31,8 @@ #include "dm_pp_smu.h" #define MEMORY_TYPE_MULTIPLIER_CZ 4 +#define MEMORY_TYPE_HBM 2 + enum dce_version resource_parse_asic_id( struct hw_asic_id asic_id); diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 1f16892f0add..1be6c44fd32f 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -137,10 +137,13 @@ #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF +#define PICASSO_15D8_REV_E3 0xE3 +#define PICASSO_15D8_REV_E4 0xE4 + #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) -#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0)) - +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3)) +#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0)) #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h index 554714c8e000..094648cac392 100644 --- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h +++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h @@ -155,7 +155,7 @@ static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x0243FC00, 0x00DC0000 { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE MP1_BASE ={ { { { 0x00016200, 0x02400400, 0x00E80000, 0x00EC0000, 0x00F00000 } }, +static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x02400400, 0x00E80000, 0x00EC0000, 0x00F00000 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index fa636cb462c1..fa8ad7db2b3a 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1531,6 +1531,7 @@ static int pp_asic_reset_mode_2(void *handle) static int pp_smu_i2c_bus_access(void *handle, bool acquire) { struct pp_hwmgr *hwmgr = handle; + int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -1540,7 +1541,11 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire) return -EINVAL; } - return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); + mutex_unlock(&hwmgr->smu_lock); + + return ret; } static const struct amd_pm_funcs pp_dpm_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 22f3c60d380f..33960fb38a5d 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -354,6 +354,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, case AMD_IP_BLOCK_TYPE_GFX: ret = smu_gfx_off_control(smu, gate); break; + case AMD_IP_BLOCK_TYPE_SDMA: + ret = smu_powergate_sdma(smu, gate); + break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 2a6da546fb55..e62bfba51562 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -177,12 +177,82 @@ static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, } +static int renoir_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + int i, size = 0, ret = 0; + uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + SmuMetrics_t metrics = {0}; + + if (!clk_table || clk_type >= SMU_CLK_COUNT) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)&metrics, false); + if (ret) + return ret; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + /* retirve table returned paramters unit is MHz */ + cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); + if (!ret) { + /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + + size += sprintf(buf + size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sprintf(buf + size, "1: %uMhz %s\n", + i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sprintf(buf + size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + } + return size; + case SMU_SOCCLK: + count = NUM_SOCCLK_DPM_LEVELS; + cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; + break; + case SMU_MCLK: + count = NUM_MEMCLK_DPM_LEVELS; + cur_value = metrics.ClockFrequency[CLOCK_UMCCLK]; + break; + case SMU_DCEFCLK: + count = NUM_DCFCLK_DPM_LEVELS; + cur_value = metrics.ClockFrequency[CLOCK_DCFCLK]; + break; + case SMU_FCLK: + count = NUM_FCLK_DPM_LEVELS; + cur_value = metrics.ClockFrequency[CLOCK_FCLK]; + break; + default: + return -EINVAL; + } + + for (i = 0; i < count; i++) { + GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + + return size; +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_table_index = renoir_get_smu_table_index, .tables_init = renoir_tables_init, .set_power_state = NULL, .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited, + .print_clk_levels = renoir_print_clk_levels, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h index e9b7237c0f7f..2a390ddd37dd 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h @@ -25,4 +25,29 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu); +/* UMD PState Renoir Msg Parameters in MHz */ +#define RENOIR_UMD_PSTATE_GFXCLK 700 +#define RENOIR_UMD_PSTATE_SOCCLK 678 +#define RENOIR_UMD_PSTATE_FCLK 800 + +#define GET_DPM_CUR_FREQ(table, clk_type, dpm_level, freq) \ + do { \ + switch (clk_type) { \ + case SMU_SOCCLK: \ + freq = table->SocClocks[dpm_level].Freq; \ + break; \ + case SMU_MCLK: \ + freq = table->MemClocks[dpm_level].Freq; \ + break; \ + case SMU_DCEFCLK: \ + freq = table->DcfClocks[dpm_level].Freq; \ + break; \ + case SMU_FCLK: \ + freq = table->FClocks[dpm_level].Freq; \ + break; \ + default: \ + break; \ + } \ + } while (0) + #endif diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 3d61c4fb4dec..9e13e466e72c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -874,6 +874,9 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) &adv7511_connector_helper_funcs); drm_connector_attach_encoder(&adv->connector, bridge->encoder); + if (adv->type == ADV7533) + ret = adv7533_attach_dsi(adv); + if (adv->i2c_main->irq) regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_HPD); @@ -1219,17 +1222,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); - - if (adv7511->type == ADV7533) { - ret = adv7533_attach_dsi(adv7511); - if (ret) - goto err_remove_bridge; - } - return 0; -err_remove_bridge: - drm_bridge_remove(&adv7511->bridge); err_unregister_cec: i2c_unregister_device(adv7511->i2c_cec); if (adv7511->cec_clk) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index aa16ea17ff9b..3ef2ac52ce94 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -26,6 +26,7 @@ */ #include <linux/dma-fence.h> +#include <linux/ktime.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -1580,9 +1581,23 @@ static void commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; const struct drm_mode_config_helper_funcs *funcs; + ktime_t start; + s64 commit_time_ms; funcs = dev->mode_config.helper_private; + /* + * We're measuring the _entire_ commit, so the time will vary depending + * on how many fences and objects are involved. For the purposes of self + * refresh, this is desirable since it'll give us an idea of how + * congested things are. This will inform our decision on how often we + * should enter self refresh after idle. + * + * These times will be averaged out in the self refresh helpers to avoid + * overreacting over one outlier frame + */ + start = ktime_get(); + drm_atomic_helper_wait_for_fences(dev, old_state, false); drm_atomic_helper_wait_for_dependencies(old_state); @@ -1592,6 +1607,11 @@ static void commit_tail(struct drm_atomic_state *old_state) else drm_atomic_helper_commit_tail(old_state); + commit_time_ms = ktime_ms_delta(ktime_get(), start); + if (commit_time_ms > 0) + drm_self_refresh_helper_update_avg_times(old_state, + (unsigned long)commit_time_ms); + drm_atomic_helper_commit_cleanup_done(old_state); drm_atomic_state_put(old_state); @@ -3275,7 +3295,7 @@ static int page_flip_common(struct drm_atomic_state *state, return PTR_ERR(crtc_state); crtc_state->event = event; - crtc_state->pageflip_flags = flags; + crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 46dc264a248b..d0a937fb0c56 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -128,7 +128,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, state->zpos_changed = false; state->commit = NULL; state->event = NULL; - state->pageflip_flags = 0; + state->async_flip = false; /* Self refresh should be canceled when a new update is available */ state->active = drm_atomic_crtc_effectively_active(state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 5a5b42db6f2a..7a26bfb5329c 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1305,8 +1305,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, if (arg->reserved) return -EINVAL; - if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && - !dev->mode_config.async_page_flip) + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) return -EINVAL; /* can't test and expect an event at the same time. */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index c456c3d3def2..769feefeeeef 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -976,14 +976,14 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) goto err_minors; } - dev->registered = true; - if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_register_all(dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f675a3bb2c88..fcd728d7cf72 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -336,7 +336,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) case DRM_CLIENT_CAP_ATOMIC: if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) return -EOPNOTSUPP; - if (req->value > 1) + /* The modesetting DDX has a totally broken idea of atomic. */ + if (current->comm[0] == 'X' && req->value == 1) { + pr_info("broken atomic modeset userspace detected, disabling atomic\n"); + return -EOPNOTSUPP; + } + if (req->value > 2) return -EINVAL; file_priv->atomic = req->value; file_priv->universal_planes = req->value; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index c355ba8e6d5d..6a23e36ed4fe 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -42,7 +42,7 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj, { int ret; - WARN_ON(dev->registered && !obj_free_cb); + WARN_ON(!dev->driver->load && dev->registered && !obj_free_cb); mutex_lock(&dev->mode_config.idr_mutex); ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL, @@ -104,7 +104,7 @@ void drm_mode_object_register(struct drm_device *dev, void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object) { - WARN_ON(dev->registered && !object->free_cb); + WARN_ON(!dev->driver->load && dev->registered && !object->free_cb); mutex_lock(&dev->mode_config.idr_mutex); if (object->id) { diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c index 4b9424a8f1f1..68f4765a5896 100644 --- a/drivers/gpu/drm/drm_self_refresh_helper.c +++ b/drivers/gpu/drm/drm_self_refresh_helper.c @@ -5,6 +5,7 @@ * Authors: * Sean Paul <seanpaul@chromium.org> */ +#include <linux/average.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/workqueue.h> @@ -50,11 +51,17 @@ * atomic_check when &drm_crtc_state.self_refresh_active is true. */ +#define SELF_REFRESH_AVG_SEED_MS 200 + +DECLARE_EWMA(psr_time, 4, 4) + struct drm_self_refresh_data { struct drm_crtc *crtc; struct delayed_work entry_work; - struct drm_atomic_state *save_state; - unsigned int entry_delay_ms; + + struct mutex avg_mutex; + struct ewma_psr_time entry_avg_ms; + struct ewma_psr_time exit_avg_ms; }; static void drm_self_refresh_helper_entry_work(struct work_struct *work) @@ -123,6 +130,44 @@ out_drop_locks: } /** + * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages + * @state: the state which has just been applied to hardware + * @commit_time_ms: the amount of time in ms that this commit took to complete + * + * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will + * update the average entry/exit self refresh times on self refresh transitions. + * These averages will be used when calculating how long to delay before + * entering self refresh mode after activity. + */ +void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; + struct ewma_psr_time *time; + + if (old_crtc_state->self_refresh_active == + new_crtc_state->self_refresh_active) + continue; + + if (new_crtc_state->self_refresh_active) + time = &sr_data->entry_avg_ms; + else + time = &sr_data->exit_avg_ms; + + mutex_lock(&sr_data->avg_mutex); + ewma_psr_time_add(time, commit_time_ms); + mutex_unlock(&sr_data->avg_mutex); + } +} +EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times); + +/** * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit * @state: the state currently being checked * @@ -153,6 +198,7 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct drm_self_refresh_data *sr_data; + unsigned int delay; /* Don't trigger the entry timer when we're already in SR */ if (crtc_state->self_refresh_active) @@ -162,8 +208,13 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) if (!sr_data) continue; + mutex_lock(&sr_data->avg_mutex); + delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) + + ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2; + mutex_unlock(&sr_data->avg_mutex); + mod_delayed_work(system_wq, &sr_data->entry_work, - msecs_to_jiffies(sr_data->entry_delay_ms)); + msecs_to_jiffies(delay)); } } EXPORT_SYMBOL(drm_self_refresh_helper_alter_state); @@ -171,12 +222,10 @@ EXPORT_SYMBOL(drm_self_refresh_helper_alter_state); /** * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc * @crtc: the crtc which supports self refresh supported displays - * @entry_delay_ms: amount of inactivity to wait before entering self refresh * * Returns zero if successful or -errno on failure */ -int drm_self_refresh_helper_init(struct drm_crtc *crtc, - unsigned int entry_delay_ms) +int drm_self_refresh_helper_init(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; @@ -190,8 +239,18 @@ int drm_self_refresh_helper_init(struct drm_crtc *crtc, INIT_DELAYED_WORK(&sr_data->entry_work, drm_self_refresh_helper_entry_work); - sr_data->entry_delay_ms = entry_delay_ms; sr_data->crtc = crtc; + mutex_init(&sr_data->avg_mutex); + ewma_psr_time_init(&sr_data->entry_avg_ms); + ewma_psr_time_init(&sr_data->exit_avg_ms); + + /* + * Seed the averages so they're non-zero (and sufficiently large + * for even poorly performing panels). As time goes on, this will be + * averaged out and the values will trend to their true value. + */ + ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS); + ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS); crtc->self_refresh_data = sr_data; return 0; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 2db029371c91..5193b6257061 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -267,7 +267,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset, asyw->image.pitch[0] = fb->base.pitches[0]; } - if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) + if (!asyh->state.async_flip) asyw->image.interval = 1; else asyw->image.interval = 0; @@ -383,7 +383,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, } /* Can't do an immediate flip while changing the LUT. */ - asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC; + asyh->state.async_flip = false; } static int diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index a1f5fa6a742a..12ff77dacc95 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -39,7 +39,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, * If frequency scaling from low to high, adjust voltage first. * If frequency scaling from high to low, adjust frequency first. */ - if (old_clk_rate < target_rate && pfdev->regulator) { + if (old_clk_rate < target_rate) { err = regulator_set_voltage(pfdev->regulator, target_volt, target_volt); if (err) { @@ -53,14 +53,12 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, if (err) { dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, err); - if (pfdev->regulator) - regulator_set_voltage(pfdev->regulator, - pfdev->devfreq.cur_volt, - pfdev->devfreq.cur_volt); + regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt, + pfdev->devfreq.cur_volt); return err; } - if (old_clk_rate > target_rate && pfdev->regulator) { + if (old_clk_rate > target_rate) { err = regulator_set_voltage(pfdev->regulator, target_volt, target_volt); if (err) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 46b0b02e4289..238fb6d54df4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -89,12 +89,9 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) { int ret; - pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali"); + pfdev->regulator = devm_regulator_get(pfdev->dev, "mali"); if (IS_ERR(pfdev->regulator)) { ret = PTR_ERR(pfdev->regulator); - pfdev->regulator = NULL; - if (ret == -ENODEV) - return 0; dev_err(pfdev->dev, "failed to get regulator: %d\n", ret); return ret; } @@ -110,8 +107,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) static void panfrost_regulator_fini(struct panfrost_device *pfdev) { - if (pfdev->regulator) - regulator_disable(pfdev->regulator); + regulator_disable(pfdev->regulator); } int panfrost_device_init(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 6010f9ee7c1f..bdd990568476 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -394,28 +394,40 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) free_io_pgtable_ops(mmu->pgtbl_ops); } -static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +static struct panfrost_gem_object * +addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) { - struct drm_mm_node *node = NULL; + struct panfrost_gem_object *bo = NULL; + struct panfrost_file_priv *priv; + struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; struct panfrost_mmu *mmu; spin_lock(&pfdev->as_lock); list_for_each_entry(mmu, &pfdev->as_lru_list, list) { - struct panfrost_file_priv *priv; - if (as != mmu->as) - continue; + if (as == mmu->as) + break; + } + if (as != mmu->as) + goto out; + + priv = container_of(mmu, struct panfrost_file_priv, mmu); - priv = container_of(mmu, struct panfrost_file_priv, mmu); - drm_mm_for_each_node(node, &priv->mm) { - if (offset >= node->start && offset < (node->start + node->size)) - goto out; + spin_lock(&priv->mm_lock); + + drm_mm_for_each_node(node, &priv->mm) { + if (offset >= node->start && + offset < (node->start + node->size)) { + bo = drm_mm_node_to_panfrost_bo(node); + drm_gem_object_get(&bo->base.base); + break; } } + spin_unlock(&priv->mm_lock); out: spin_unlock(&pfdev->as_lock); - return node; + return bo; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -423,29 +435,28 @@ out: int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { int ret, i; - struct drm_mm_node *node; struct panfrost_gem_object *bo; struct address_space *mapping; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; - node = addr_to_drm_mm_node(pfdev, as, addr); - if (!node) + bo = addr_to_drm_mm_node(pfdev, as, addr); + if (!bo) return -ENOENT; - bo = drm_mm_node_to_panfrost_bo(node); if (!bo->is_heap) { dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", - node->start << PAGE_SHIFT); - return -EINVAL; + bo->node.start << PAGE_SHIFT); + ret = -EINVAL; + goto err_bo; } WARN_ON(bo->mmu->as != as); /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; - page_offset -= node->start; + page_offset -= bo->node.start; mutex_lock(&bo->base.pages_lock); @@ -454,7 +465,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { mutex_unlock(&bo->base.pages_lock); - return -ENOMEM; + ret = -ENOMEM; + goto err_bo; } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -463,7 +475,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) kfree(bo->sgts); bo->sgts = NULL; mutex_unlock(&bo->base.pages_lock); - return -ENOMEM; + ret = -ENOMEM; + goto err_bo; } bo->base.pages = pages; bo->base.pages_use_count = 1; @@ -501,12 +514,16 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); + drm_gem_object_put_unlocked(&bo->base.base); + return 0; err_map: sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); +err_bo: + drm_gem_object_put_unlocked(&bo->base.base); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 431e6b64b77d..d0bc91ed7c90 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -324,8 +324,39 @@ bool radeon_device_is_virtual(void); static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + unsigned long flags = 0; int ret; + if (!ent) + return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */ + + flags = ent->driver_data; + + if (!radeon_si_support) { + switch (flags & RADEON_FAMILY_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + dev_info(&pdev->dev, + "SI support disabled by module param\n"); + return -ENODEV; + } + } + if (!radeon_cik_support) { + switch (flags & RADEON_FAMILY_MASK) { + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + dev_info(&pdev->dev, + "CIK support disabled by module param\n"); + return -ENODEV; + } + } + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 07f7ace42c4b..e85c554eeaa9 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -100,31 +100,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) struct radeon_device *rdev; int r, acpi_status; - if (!radeon_si_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(dev->dev, - "SI support disabled by module param\n"); - return -ENODEV; - } - } - if (!radeon_cik_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(dev->dev, - "CIK support disabled by module param\n"); - return -ENODEV; - } - } - rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { return -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 2f821c58007c..613404f86668 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -39,8 +39,6 @@ #include "rockchip_drm_vop.h" #include "rockchip_rgb.h" -#define VOP_SELF_REFRESH_ENTRY_DELAY_MS 100 - #define VOP_WIN_SET(vop, win, name, v) \ vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name) #define VOP_SCL_SET(vop, win, name, v) \ @@ -1563,8 +1561,7 @@ static int vop_create_crtc(struct vop *vop) init_completion(&vop->line_flag_completion); crtc->port = port; - ret = drm_self_refresh_helper_init(crtc, - VOP_SELF_REFRESH_ENTRY_DELAY_MS); + ret = drm_self_refresh_helper_init(crtc); if (ret) DRM_DEV_DEBUG_KMS(vop->dev, "Failed to init %s with SR helpers %d, ignoring\n", |