aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c498
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c84
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c209
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c114
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h23
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c18
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c74
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h72
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c56
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c56
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h49
-rw-r--r--drivers/gpu/drm/amd/pm/inc/hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c22
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c63
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c59
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c18
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c80
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c30
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c86
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c206
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c97
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c67
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c107
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c48
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c102
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/i915_module.c37
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c167
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c173
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c42
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c59
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c44
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/Kconfig6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c131
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c40
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c102
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c85
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c11
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c141
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c77
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c50
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c15
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h74
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c33
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c161
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c146
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c83
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c190
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h18
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c11
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile3
-rw-r--r--drivers/gpu/drm/tegra/dc.c358
-rw-r--r--drivers/gpu/drm/tegra/dc.h17
-rw-r--r--drivers/gpu/drm/tegra/drm.c98
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/firewall.c254
-rw-r--r--drivers/gpu/drm/tegra/gem.c13
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/plane.c117
-rw-r--r--drivers/gpu/drm/tegra/plane.h16
-rw-r--r--drivers/gpu/drm/tegra/submit.c625
-rw-r--r--drivers/gpu/drm/tegra/submit.h21
-rw-r--r--drivers/gpu/drm/tegra/uapi.c338
-rw-r--r--drivers/gpu/drm/tegra/uapi.h58
-rw-r--r--drivers/gpu/drm/tegra/vic.c112
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/cdma.c58
-rw-r--r--drivers/gpu/host1x/fence.c168
-rw-r--r--drivers/gpu/host1x/fence.h13
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c87
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c32
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x01.c8
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x06.c16
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_uclass.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_uclass.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_uclass.h12
-rw-r--r--drivers/gpu/host1x/intr.c9
-rw-r--r--drivers/gpu/host1x/intr.h2
-rw-r--r--drivers/gpu/host1x/job.c98
-rw-r--r--drivers/gpu/host1x/job.h16
-rw-r--r--drivers/gpu/host1x/syncpt.c2
-rw-r--r--drivers/gpu/host1x/syncpt.h12
271 files changed, 7381 insertions, 2540 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0d372354c2d0..cea777ae7fb9 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -256,7 +256,6 @@ config DRM_AMDGPU
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
- select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 0d814c957461..8d0748184a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -58,7 +58,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
- amdgpu_eeprom.o
+ amdgpu_eeprom.o amdgpu_mca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -189,6 +189,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_reset.o
+# add MCA block
+amdgpu-y += \
+ mca_v3_0.o
+
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 96e895d6be35..dc3c6b3a00e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -108,6 +108,7 @@
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
+#include "amdgpu_mca.h"
#define MAX_GPU_INSTANCE 16
@@ -1009,6 +1010,9 @@ struct amdgpu_device {
/* df */
struct amdgpu_df df;
+ /* MCA */
+ struct amdgpu_mca mca;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1271,6 +1275,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
/* Common functions */
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 260ba01d303e..4811b0faafd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 7b46ba551cb2..3003ee1c9487 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -714,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
ret = dma_fence_wait(f, false);
err_ib_sched:
- dma_fence_put(f);
amdgpu_job_free(job);
err:
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index a5434b713856..46cd4ee6bafb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -44,4 +44,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 6409d6b1b2df..5a7f680bcb3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -305,5 +305,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base =
kgd_gfx_v9_set_vm_context_page_table_base,
- .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 491acdf92f73..960acf68150a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
default:
type = DRAIN_PIPE;
break;
@@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
+static void program_trap_handler_settings(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ lower_32_bits(tba_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ upper_32_bits(tba_addr >> 8) |
+ (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ lower_32_bits(tma_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ upper_32_bits(tma_addr >> 8));
+
+ unlock_srbm(kgd);
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .program_trap_handler_settings = program_trap_handler_settings,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 1f5620cc3570..dac0d751d5af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
default:
type = DRAIN_PIPE;
break;
@@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
+static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ lower_32_bits(tba_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ upper_32_bits(tba_addr >> 8) |
+ (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ lower_32_bits(tma_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ upper_32_bits(tma_addr >> 8));
+
+ unlock_srbm(kgd);
+}
+
#if 0
uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
uint32_t trap_debug_wave_launch_mode,
@@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.address_watch_get_offset = address_watch_get_offset_v10_3,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
+ .program_trap_handler_settings = program_trap_handler_settings_v10_3,
#if 0
.enable_debug_trap = enable_debug_trap_v10_3,
.disable_debug_trap = disable_debug_trap_v10_3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index ed3014fbb563..bcc1cbeb8799 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -42,7 +42,8 @@
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
- RESET_WAVES
+ RESET_WAVES,
+ SAVE_WAVES
};
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
default:
type = DRAIN_PIPE;
break;
@@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
adev->gfx.cu_info.max_waves_per_simd;
}
+void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ lower_32_bits(tba_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ upper_32_bits(tba_addr >> 8));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ lower_32_bits(tma_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ upper_32_bits(tma_addr >> 8));
+
+ unlock_srbm(kgd);
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index e64deba8900f..c63591106879 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -65,3 +65,5 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu);
+void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 8f53837d4d3e..97178b307ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
}
-/*
- * Helper function to query RAS EEPROM address
- *
- * @adev: amdgpu_device pointer
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * adev: amdgpu_device pointer
+ * i2c_address: pointer to u8; if not NULL, will contain
+ * the RAS EEPROM address if the function returns true
*
- * Return true if vbios supports ras rom address reporting
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
*/
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+ u8 *i2c_address)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
union firmware_info *firmware_info;
u8 frev, crev;
- if (i2c_address == NULL)
- return false;
-
- *i2c_address = 0;
-
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
+ firmwareinfo);
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
- index, &size, &frev, &crev, &data_offset)) {
+ index, &size, &frev, &crev,
+ &data_offset)) {
/* support firmware_info 3.4 + */
if ((frev == 3 && crev >=4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
- *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ /* The ras_rom_i2c_slave_addr should ideally
+ * be a 19-bit EEPROM address, which would be
+ * used as is by the driver; see top of
+ * amdgpu_eeprom.c.
+ *
+ * When this is the case, 0 is of course a
+ * valid RAS EEPROM address, in which case,
+ * we'll drop the first "if (firm...)" and only
+ * leave the check for the pointer.
+ *
+ * The reason this works right now is because
+ * ras_rom_i2c_slave_addr contains the EEPROM
+ * device type qualifier 1010b in the top 4
+ * bits.
+ */
+ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+ if (i2c_address)
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ return true;
+ }
}
}
- if (*i2c_address != 0)
- return true;
-
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 536005bff24a..277128846dd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1414,7 +1414,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && job->fence == fence)
+ if (preempted && (&job->hw_fence) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d7cc45e68dbd..41c6b3aacd37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2829,12 +2829,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
- mutex_lock(&adev->gfx.gfx_off_mutex);
- if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
- adev->gfx.gfx_off_state = true;
- }
- mutex_unlock(&adev->gfx.gfx_off_mutex);
+ WARN_ON_ONCE(adev->gfx.gfx_off_state);
+ WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
}
/**
@@ -3826,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
- ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ if (adev->mman.initialized) {
+ flush_delayed_work(&adev->mman.bdev.wq);
+ ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ }
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@@ -4448,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
- int i, r = 0;
+ int i, j, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4472,6 +4474,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
+ /*clear job fence from fence drv to avoid force_completion
+ *leave NULL and vm flush fence in fence drv */
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
+ struct dma_fence *old, **ptr;
+
+ ptr = &ring->fence_drv.fences[j];
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
+ RCU_INIT_POINTER(*ptr, NULL);
+ }
+ }
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 43e7b61d1c5c..ada7bc19118a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
ip->major, ip->minor,
ip->revision);
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+ adev->vcn.num_vcn_inst++;
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct harvest_table *harvest_info;
- int i;
+ int i, vcn_harvest_count = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
- adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
- adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ vcn_harvest_count++;
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
}
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8e5a7ac8c36f..7a7316731911 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RENOIR:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b6640291f980..f18240f87387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
/* DIMGREY_CAVEFISH */
@@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 5a143ca02cf9..cd0acbea75da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
return 0;
out:
- if (abo) {
-
- }
if (fb && ret) {
drm_gem_object_put(gobj);
drm_framebuffer_unregister_private(fb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index d94c5419ec25..5a6857c44bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+ struct amdgpu_bo *root;
int ret;
ret = amdgpu_file_to_fpriv(f, &fpriv);
@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
+ root = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!root)
+ return;
+
+ ret = amdgpu_bo_reserve(root, false);
if (ret) {
DRM_ERROR("Fail to reserve bo\n");
return;
}
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(fpriv->vm.root.bo);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
dev, fn, fpriv->vm.pasid);
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 6ed53669f3e0..8d682befe0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
+ * @job: job the fence is embedded in
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
unsigned flags)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_fence *fence;
+ struct dma_fence *fence;
+ struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
- if (fence == NULL)
- return -ENOMEM;
+ if (job == NULL) {
+ /* create a sperate hw fence */
+ am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
+ if (am_fence == NULL)
+ return -ENOMEM;
+ fence = &am_fence->base;
+ am_fence->ring = ring;
+ } else {
+ /* take use of job-embedded fence */
+ fence = &job->hw_fence;
+ }
seq = ++ring->fence_drv.sync_seq;
- fence->ring = ring;
- dma_fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ if (job != NULL && job->job_run_counter) {
+ /* reinit seq for resubmitted jobs */
+ fence->seqno = seq;
+ } else {
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
+ }
+
+ if (job != NULL) {
+ /* mark this fence has a parent job */
+ set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+ }
+
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = &fence->base;
+ *f = fence;
return 0;
}
@@ -532,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_stop(&ring->sched, NULL);
+
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
r = amdgpu_fence_wait_empty(ring);
@@ -591,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler) {
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
/* enable the interrupt */
if (ring->fence_drv.irq_src)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
@@ -621,8 +649,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- return (const char *)fence->ring->name;
+ struct amdgpu_ring *ring;
+
+ if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+ ring = to_amdgpu_ring(job->base.sched);
+ } else {
+ ring = to_amdgpu_fence(f)->ring;
+ }
+ return (const char *)ring->name;
}
/**
@@ -635,13 +671,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- struct amdgpu_ring *ring = fence->ring;
+ struct amdgpu_ring *ring;
+
+ if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+ ring = to_amdgpu_ring(job->base.sched);
+ } else {
+ ring = to_amdgpu_fence(f)->ring;
+ }
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -656,8 +699,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
+
+ if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+ /* free job if fence has a parent job */
+ struct amdgpu_job *job;
+
+ job = container_of(f, struct amdgpu_job, hw_fence);
+ kfree(job);
+ } else {
+ /* free fence_slab if it's separated fence*/
+ struct amdgpu_fence *fence;
+
+ fence = to_amdgpu_fence(f);
+ kmem_cache_free(amdgpu_fence_slab, fence);
+ }
}
/**
@@ -680,6 +735,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
+
/*
* Fence debugfs
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b36405170ff3..76efd5f8950f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -76,7 +76,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
if (adev->dummy_page_addr)
return 0;
adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
@@ -96,8 +96,8 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
- pci_unmap_page(adev->pdev, adev->dummy_page_addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
adev->dummy_page_addr = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index c6f2fb9557ff..d6aa032890ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -341,21 +341,18 @@ retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
+ if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
- size, initial_domain, args->in.alignment, r);
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
}
- return r;
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
@@ -904,7 +901,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- domain = amdgpu_bo_get_preferred_pin_domain(adev,
+ domain = amdgpu_bo_get_preferred_domain(adev,
amdgpu_display_supported_domains(adev, flags));
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a0be0772c8b3..e7f06bd0f0cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->gfx.gfx_off_mutex);
- if (!enable)
- adev->gfx.gfx_off_req_count++;
- else if (adev->gfx.gfx_off_req_count > 0)
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+ goto unlock;
+
adev->gfx.gfx_off_req_count--;
- if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
- } else if (!enable && adev->gfx.gfx_off_state) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
- adev->gfx.gfx_off_state = false;
+ if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+ if (adev->gfx.gfx_off_state &&
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+ adev->gfx.gfx_off_state = false;
- if (adev->gfx.funcs->init_spm_golden) {
- dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
- amdgpu_gfx_init_spm_golden(adev);
+ if (adev->gfx.funcs->init_spm_golden) {
+ dev_dbg(adev->dev,
+ "GFXOFF is disabled, re-init SPM golden settings\n");
+ amdgpu_gfx_init_spm_golden(adev);
+ }
}
}
+
+ adev->gfx.gfx_off_req_count++;
}
+unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
@@ -615,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->gfx.ras_if->sub_block_index = 0;
- strcpy(adev->gfx.ras_if->name, "gfx");
}
fs_info.head = ih_info.head = *adev->gfx.ras_if;
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d0b8d415b63b..c7797eac83c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -471,6 +471,27 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
return r;
}
+ if (adev->mca.mp0.ras_funcs &&
+ adev->mca.mp0.ras_funcs->ras_late_init) {
+ r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mca.mp1.ras_funcs &&
+ adev->mca.mp1.ras_funcs->ras_late_init) {
+ r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mca.mpio.ras_funcs &&
+ adev->mca.mpio.ras_funcs->ras_late_init) {
+ r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 543000304a1c..675a72ef305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Dummy, allocate the node but no space for it yet.
*/
@@ -182,7 +182,7 @@ err_out:
* amdgpu_gtt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index 1d50d534d77c..a766e1aad2b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->hdp.ras_if->sub_block_index = 0;
- strcpy(adev->hdp.ras_if->name, "hdp");
}
ih_info.head = fs_info.head = *adev->hdp.ras_if;
r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index bca4dddd5a15..82608df43396 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
void
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
- u8 val;
+ u8 val = 0;
if (!amdgpu_connector->router.ddc_valid)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ec65ab0ddf89..c076a6b9a5a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
- r = amdgpu_fence_emit(ring, f, fence_flags);
+ r = amdgpu_fence_emit(ring, f, job, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d33e6d97cc89..de29518673dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
+ struct dma_fence *hw_fence;
unsigned i;
- /* use sched fence if available */
- f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+ if (job->hw_fence.ops == NULL)
+ hw_fence = job->external_hw_fence;
+ else
+ hw_fence = &job->hw_fence;
+ /* use sched fence if available */
+ f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
@@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
drm_sched_job_cleanup(s_job);
- dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- kfree(job);
+
+ /* only put the hw fence if has embedded fence */
+ if (job->hw_fence.ops != NULL)
+ dma_fence_put(&job->hw_fence);
+ else
+ kfree(job);
}
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
-
- dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- kfree(job);
+
+ /* only put the hw fence if has embedded fence */
+ if (job->hw_fence.ops != NULL)
+ dma_fence_put(&job->hw_fence);
+ else
+ kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->base.sched = &ring->sched;
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
- job->fence = dma_fence_get(*fence);
+ /* record external_hw_fence for direct submit */
+ job->external_hw_fence = dma_fence_get(*fence);
if (r)
return r;
amdgpu_job_free(job);
+ dma_fence_put(*fence);
+
return 0;
}
@@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
- /* if gpu reset, hw fence will be replaced here */
- dma_fence_put(job->fence);
- job->fence = dma_fence_get(fence);
+ if (!job->job_run_counter)
+ dma_fence_get(fence);
+ else if (finished->error < 0)
+ dma_fence_put(&job->hw_fence);
+ job->job_run_counter++;
amdgpu_job_free_resources(job);
fence = r ? ERR_PTR(r) : fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 81caac9b958a..9e65730193b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -46,7 +46,8 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs;
- struct dma_fence *fence; /* the hw fence */
+ struct dma_fence hw_fence;
+ struct dma_fence *external_hw_fence;
uint32_t preamble_status;
uint32_t preemption_status;
uint32_t num_ibs;
@@ -62,6 +63,9 @@ struct amdgpu_job {
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
+
+ /* job_run_counter >= 1 means a resubmit job */
+ uint32_t job_run_counter;
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 8996cb4ed57a..9342aa23ebd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -47,8 +47,6 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
{
int i;
- cancel_delayed_work_sync(&adev->jpeg.idle_work);
-
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 20b049ad61c1..7e45640fbee0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+ fw_info->feature = adev->psp.xgmi.feature_version;
break;
case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_ras_ucode_version;
+ fw_info->feature = adev->psp.ras.feature_version;
break;
case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+ fw_info->feature = adev->psp.hdcp.feature_version;
break;
case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_dtm_ucode_version;
+ fw_info->feature = adev->psp.dtm.feature_version;
break;
case TA_FW_TYPE_PSP_RAP:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_rap_ucode_version;
+ fw_info->feature = adev->psp.rap.feature_version;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
+ fw_info->feature = adev->psp.securedisplay.feature_version;
break;
default:
return -EINVAL;
@@ -378,8 +378,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
- fw_info->ver = adev->psp.asd_fw_version;
- fw_info->feature = adev->psp.asd_feature_version;
+ fw_info->ver = adev->psp.asd.fw_version;
+ fw_info->feature = adev->psp.asd.feature_version;
break;
case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
new file mode 100644
index 000000000000..a2d3dbbf7d25
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+#include "amdgpu_mca.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count)
+{
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+
+ if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count)
+{
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+
+ if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr)
+{
+ WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
+}
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
+ amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
+
+ amdgpu_mca_reset_error_count(adev, mc_status_addr);
+}
+
+int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
+ struct amdgpu_mca_ras *mca_dev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = mca_dev->ras_funcs->sysfs_name,
+ };
+
+ if (!mca_dev->ras_if) {
+ mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!mca_dev->ras_if)
+ return -ENOMEM;
+ mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
+ mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ mca_dev->ras_if->sub_block_index = 0;
+ }
+ ih_info.head = fs_info.head = *mca_dev->ras_if;
+ r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
+ kfree(mca_dev->ras_if);
+ mca_dev->ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
+ struct amdgpu_mca_ras *mca_dev)
+{
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ if (!mca_dev->ras_if)
+ return;
+
+ amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
+ kfree(mca_dev->ras_if);
+ mca_dev->ras_if = NULL;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
new file mode 100644
index 000000000000..f860f2f0e296
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_MCA_H__
+#define __AMDGPU_MCA_H__
+
+struct amdgpu_mca_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*query_ras_error_address)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ uint32_t ras_block;
+ const char* sysfs_name;
+};
+
+struct amdgpu_mca_ras {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mca_ras_funcs *ras_funcs;
+};
+
+struct amdgpu_mca_funcs {
+ void (*init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mca {
+ const struct amdgpu_mca_funcs *funcs;
+ struct amdgpu_mca_ras mp0;
+ struct amdgpu_mca_ras mp1;
+ struct amdgpu_mca_ras mpio;
+};
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count);
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count);
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr);
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ void *ras_error_status);
+
+int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
+ struct amdgpu_mca_ras *mca_dev);
+
+void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
+ struct amdgpu_mca_ras *mca_dev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index ead3dc572ec5..24297dc51434 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->mmhub.ras_if->sub_block_index = 0;
- strcpy(adev->mmhub.ras_if->name, "mmhub");
}
ih_info.head = fs_info.head = *adev->mmhub.ras_if;
r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 6201a5f4b4fa..6afb02fef8cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->nbio.ras_if->sub_block_index = 0;
- strcpy(adev->nbio.ras_if->name, "pcie_bif");
}
ih_info.head = fs_info.head = *adev->nbio.ras_if;
r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d15eee98204d..01a78c786536 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
}
- /* This assumes only APU display buffers are pinned with (VRAM|GTT).
- * See function amdgpu_display_supported_domains()
- */
- domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.resource->mem_type;
uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+ * See function amdgpu_display_supported_domains()
+ */
+ domain = amdgpu_bo_get_preferred_domain(adev, domain);
+
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
@@ -1518,14 +1518,14 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
*
* Returns:
- * Which of the allowed domains is preferred for pinning the BO for scanout.
+ * Which of the allowed domains is preferred for allocating the BO.
*/
-uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain)
{
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index e72f329e7f18..9d6c001c15f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -333,7 +333,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
-uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index f2e20666c9c1..4eaec446b49d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
* Calculate feedback and reference divider for a given post divider. Makes
* sure we stay within the limits.
*/
-static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
- unsigned fb_div_max, unsigned ref_div_max,
- unsigned *fb_div, unsigned *ref_div)
+static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
+ unsigned int den, unsigned int post_div,
+ unsigned int fb_div_max, unsigned int ref_div_max,
+ unsigned int *fb_div, unsigned int *ref_div)
{
+
/* limit reference * post divider to a maximum */
- ref_div_max = min(128 / post_div, ref_div_max);
+ if (adev->family == AMDGPU_FAMILY_SI)
+ ref_div_max = min(100 / post_div, ref_div_max);
+ else
+ ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -112,7 +117,8 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
* Try to calculate the PLL parameters to generate the given frequency:
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
*/
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+ struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
@@ -199,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
unsigned diff;
- amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
+ amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
ref_div_max, &fb_div, &ref_div);
diff = abs(target_clock - (pll->reference_freq * fb_div) /
(ref_div * post_div));
@@ -214,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
post_div = post_div_best;
/* get the feedback and reference divider for the optimal value */
- amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+ amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
&fb_div, &ref_div);
/* reduce the numbers to a simpler ratio once more */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
index db6136f68b82..44a583d6c9b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
@@ -24,7 +24,8 @@
#ifndef __AMDGPU_PLL_H__
#define __AMDGPU_PLL_H__
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+ struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9dc2d6d9712a..9b41cb8c3de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_xgmi.h"
#include "soc15_common.h"
#include "psp_v3_1.h"
#include "psp_v10_0.h"
@@ -468,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode (%s) ",
- amdgpu_ucode_name(ucode->ucode_id));
- DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ DRM_WARN("failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
if (!timeout) {
ret = -EINVAL;
@@ -799,15 +800,15 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
return 0;
cmd = acquire_psp_cmd_buf(psp);
- psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
+ psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->asd_ucode_size);
+ psp->asd.size_bytes);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -898,23 +899,37 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
}
-static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+static int psp_ta_init_shared_buf(struct psp_context *psp,
+ struct ta_mem_context *mem_ctx,
+ uint32_t shared_mem_size)
{
int ret;
/*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for xgmi ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->xgmi_context.xgmi_shared_bo,
- &psp->xgmi_context.xgmi_shared_mc_addr,
- &psp->xgmi_context.xgmi_shared_buf);
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for ta to host memory
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &mem_ctx->shared_bo,
+ &mem_ctx->shared_mc_addr,
+ &mem_ctx->shared_buf);
return ret;
}
+static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
+{
+ amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
+ &mem_ctx->shared_buf);
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+ return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context,
+ PSP_XGMI_SHARED_MEM_SIZE);
+}
+
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint32_t ta_cmd_id,
uint32_t session_id)
@@ -952,20 +967,20 @@ static int psp_xgmi_load(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
- psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+ psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_xgmi_ucode_size,
- psp->xgmi_context.xgmi_shared_mc_addr,
+ psp->xgmi.size_bytes,
+ psp->xgmi_context.context.mem_context.shared_mc_addr,
PSP_XGMI_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->xgmi_context.initialized = 1;
- psp->xgmi_context.session_id = cmd->resp.session_id;
+ psp->xgmi_context.context.initialized = true;
+ psp->xgmi_context.context.session_id = cmd->resp.session_id;
}
release_psp_cmd_buf(psp);
@@ -990,7 +1005,7 @@ static int psp_xgmi_unload(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -1002,41 +1017,42 @@ static int psp_xgmi_unload(struct psp_context *psp)
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
}
int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
- if (!psp->xgmi_context.initialized)
+ if (!psp->xgmi_context.context.initialized)
return 0;
ret = psp_xgmi_unload(psp);
if (ret)
return ret;
- psp->xgmi_context.initialized = 0;
+ psp->xgmi_context.context.initialized = false;
/* free xgmi shared memory */
- amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
- &psp->xgmi_context.xgmi_shared_mc_addr,
- &psp->xgmi_context.xgmi_shared_buf);
+ psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
return 0;
}
-int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw ||
- !psp->adev->psp.ta_xgmi_ucode_size ||
- !psp->adev->psp.ta_xgmi_start_addr)
+ if (!psp->ta_fw ||
+ !psp->xgmi.size_bytes ||
+ !psp->xgmi.start_addr)
return -ENOENT;
- if (!psp->xgmi_context.initialized) {
+ if (!load_ta)
+ goto invoke;
+
+ if (!psp->xgmi_context.context.initialized) {
ret = psp_xgmi_init_shared_buf(psp);
if (ret)
return ret;
@@ -1047,9 +1063,11 @@ int psp_xgmi_initialize(struct psp_context *psp)
if (ret)
return ret;
+invoke:
/* Initialize XGMI session */
- xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->flag_extend_link_record = set_extended_data;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
@@ -1062,7 +1080,7 @@ int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
@@ -1082,7 +1100,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
@@ -1100,12 +1118,59 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{
return psp->adev->asic_type == CHIP_ALDEBARAN &&
- psp->ta_xgmi_ucode_version >= 0x2000000b;
+ psp->xgmi.feature_version >= 0x2000000b;
+}
+
+/*
+ * Chips that support extended topology information require the driver to
+ * reflect topology information in the opposite direction. This is
+ * because the TA has already exceeded its link record limit and if the
+ * TA holds bi-directional information, the driver would have to do
+ * multiple fetches instead of just two.
+ */
+static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+ struct psp_xgmi_node_info node_info)
+{
+ struct amdgpu_device *mirror_adev;
+ struct amdgpu_hive_info *hive;
+ uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
+ uint64_t dst_node_id = node_info.node_id;
+ uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_num_links = node_info.num_links;
+
+ hive = amdgpu_get_xgmi_hive(psp->adev);
+ list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+ struct psp_xgmi_topology_info *mirror_top_info;
+ int j;
+
+ if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
+ continue;
+
+ mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
+ for (j = 0; j < mirror_top_info->num_nodes; j++) {
+ if (mirror_top_info->nodes[j].node_id != src_node_id)
+ continue;
+
+ mirror_top_info->nodes[j].num_hops = dst_num_hops;
+ /*
+ * prevent 0 num_links value re-reflection since reflection
+ * criteria is based on num_hops (direct or indirect).
+ *
+ */
+ if (dst_num_links)
+ mirror_top_info->nodes[j].num_links = dst_num_links;
+
+ break;
+ }
+
+ break;
+ }
}
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
- struct psp_xgmi_topology_info *topology)
+ struct psp_xgmi_topology_info *topology,
+ bool get_extended_data)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
@@ -1116,8 +1181,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
- xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->flag_extend_link_record = get_extended_data;
/* Fill in the shared memory with topology information as input */
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1140,10 +1206,19 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
for (i = 0; i < topology->num_nodes; i++) {
- topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
- topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
- topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
- topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ /* extended data will either be 0 or equal to non-extended data */
+ if (topology_info_output->nodes[i].num_hops)
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+
+ /* non-extended data gets everything here so no need to update */
+ if (!get_extended_data) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].is_sharing_enabled =
+ topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine =
+ topology_info_output->nodes[i].sdma_engine;
+ }
+
}
/* Invoke xgmi ta again to get the link information */
@@ -1158,9 +1233,18 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
return ret;
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
- for (i = 0; i < topology->num_nodes; i++)
- topology->nodes[i].num_links =
+ for (i = 0; i < topology->num_nodes; i++) {
+ /* accumulate num_links on extended data */
+ topology->nodes[i].num_links = get_extended_data ?
+ topology->nodes[i].num_links +
+ link_info_output->nodes[i].num_links :
link_info_output->nodes[i].num_links;
+
+ /* reflect the topology information for bi-directionality */
+ if (psp->xgmi_context.supports_extended_data &&
+ get_extended_data && topology->nodes[i].num_hops)
+ psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
+ }
}
return 0;
@@ -1177,7 +1261,7 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
- xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1198,19 +1282,8 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for ras ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->ras.ras_shared_bo,
- &psp->ras.ras_shared_mc_addr,
- &psp->ras.ras_shared_buf);
-
- return ret;
+ return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context,
+ PSP_RAS_SHARED_MEM_SIZE);
}
static int psp_ras_load(struct psp_context *psp)
@@ -1225,9 +1298,9 @@ static int psp_ras_load(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+ psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
if (psp->adev->gmc.xgmi.connected_to_cpu)
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
@@ -1238,18 +1311,18 @@ static int psp_ras_load(struct psp_context *psp)
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_ras_ucode_size,
- psp->ras.ras_shared_mc_addr,
+ psp->ras.size_bytes,
+ psp->ras_context.context.mem_context.shared_mc_addr,
PSP_RAS_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->ras.session_id = cmd->resp.session_id;
+ psp->ras_context.context.session_id = cmd->resp.session_id;
if (!ras_cmd->ras_status)
- psp->ras.ras_initialized = true;
+ psp->ras_context.context.initialized = true;
else
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
}
@@ -1275,7 +1348,7 @@ static int psp_ras_unload(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -1290,7 +1363,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
struct ta_ras_shared_memory *ras_cmd;
int ret;
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
/*
* TODO: bypass the loading in sriov for now
@@ -1298,7 +1371,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
if (amdgpu_ras_intr_triggered())
return ret;
@@ -1354,10 +1427,10 @@ int psp_ras_enable_features(struct psp_context *psp,
struct ta_ras_shared_memory *ras_cmd;
int ret;
- if (!psp->ras.ras_initialized)
+ if (!psp->ras_context.context.initialized)
return -EINVAL;
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
if (enable)
@@ -1384,19 +1457,17 @@ static int psp_ras_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->ras.ras_initialized)
+ if (!psp->ras_context.context.initialized)
return 0;
ret = psp_ras_unload(psp);
if (ret)
return ret;
- psp->ras.ras_initialized = false;
+ psp->ras_context.context.initialized = false;
/* free ras shared memory */
- amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
- &psp->ras.ras_shared_mc_addr,
- &psp->ras.ras_shared_buf);
+ psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
return 0;
}
@@ -1413,8 +1484,8 @@ static int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev))
return 0;
- if (!adev->psp.ta_ras_ucode_size ||
- !adev->psp.ta_ras_start_addr) {
+ if (!adev->psp.ras.size_bytes ||
+ !adev->psp.ras.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -1460,7 +1531,7 @@ static int psp_ras_initialize(struct psp_context *psp)
}
}
- if (!psp->ras.ras_initialized) {
+ if (!psp->ras_context.context.initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
return ret;
@@ -1479,10 +1550,10 @@ int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_shared_memory *ras_cmd;
int ret;
- if (!psp->ras.ras_initialized)
+ if (!psp->ras_context.context.initialized)
return -EINVAL;
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
@@ -1504,19 +1575,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
// HDCP start
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for hdcp ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->hdcp_context.hdcp_shared_bo,
- &psp->hdcp_context.hdcp_shared_mc_addr,
- &psp->hdcp_context.hdcp_shared_buf);
-
- return ret;
+ return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context,
+ PSP_HDCP_SHARED_MEM_SIZE);
}
static int psp_hdcp_load(struct psp_context *psp)
@@ -1530,22 +1590,22 @@ static int psp_hdcp_load(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- psp_copy_fw(psp, psp->ta_hdcp_start_addr,
- psp->ta_hdcp_ucode_size);
+ psp_copy_fw(psp, psp->hdcp.start_addr,
+ psp->hdcp.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_hdcp_ucode_size,
- psp->hdcp_context.hdcp_shared_mc_addr,
+ psp->hdcp.size_bytes,
+ psp->hdcp_context.context.mem_context.shared_mc_addr,
PSP_HDCP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->hdcp_context.hdcp_initialized = true;
- psp->hdcp_context.session_id = cmd->resp.session_id;
+ psp->hdcp_context.context.initialized = true;
+ psp->hdcp_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->hdcp_context.mutex);
}
@@ -1563,13 +1623,13 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->adev->psp.ta_hdcp_ucode_size ||
- !psp->adev->psp.ta_hdcp_start_addr) {
+ if (!psp->hdcp.size_bytes ||
+ !psp->hdcp.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
- if (!psp->hdcp_context.hdcp_initialized) {
+ if (!psp->hdcp_context.context.initialized) {
ret = psp_hdcp_init_shared_buf(psp);
if (ret)
return ret;
@@ -1595,7 +1655,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -1612,7 +1672,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
}
static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1625,8 +1685,8 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized) {
- if (psp->hdcp_context.hdcp_shared_buf)
+ if (!psp->hdcp_context.context.initialized) {
+ if (psp->hdcp_context.context.mem_context.shared_buf)
goto out;
else
return 0;
@@ -1636,13 +1696,11 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->hdcp_context.hdcp_initialized = false;
+ psp->hdcp_context.context.initialized = false;
out:
/* free hdcp shared memory */
- amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
- &psp->hdcp_context.hdcp_shared_mc_addr,
- &psp->hdcp_context.hdcp_shared_buf);
+ psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
return 0;
}
@@ -1651,19 +1709,8 @@ out:
// DTM start
static int psp_dtm_init_shared_buf(struct psp_context *psp)
{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for dtm ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->dtm_context.dtm_shared_bo,
- &psp->dtm_context.dtm_shared_mc_addr,
- &psp->dtm_context.dtm_shared_buf);
-
- return ret;
+ return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context,
+ PSP_DTM_SHARED_MEM_SIZE);
}
static int psp_dtm_load(struct psp_context *psp)
@@ -1677,21 +1724,21 @@ static int psp_dtm_load(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+ psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_dtm_ucode_size,
- psp->dtm_context.dtm_shared_mc_addr,
+ psp->dtm.size_bytes,
+ psp->dtm_context.context.mem_context.shared_mc_addr,
PSP_DTM_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->dtm_context.dtm_initialized = true;
- psp->dtm_context.session_id = cmd->resp.session_id;
+ psp->dtm_context.context.initialized = true;
+ psp->dtm_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->dtm_context.mutex);
}
@@ -1710,13 +1757,13 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->adev->psp.ta_dtm_ucode_size ||
- !psp->adev->psp.ta_dtm_start_addr) {
+ if (!psp->dtm.size_bytes ||
+ !psp->dtm.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
- if (!psp->dtm_context.dtm_initialized) {
+ if (!psp->dtm_context.context.initialized) {
ret = psp_dtm_init_shared_buf(psp);
if (ret)
return ret;
@@ -1742,7 +1789,7 @@ static int psp_dtm_unload(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -1759,7 +1806,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
}
static int psp_dtm_terminate(struct psp_context *psp)
@@ -1772,8 +1819,8 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized) {
- if (psp->dtm_context.dtm_shared_buf)
+ if (!psp->dtm_context.context.initialized) {
+ if (psp->dtm_context.context.mem_context.shared_buf)
goto out;
else
return 0;
@@ -1783,13 +1830,11 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->dtm_context.dtm_initialized = false;
+ psp->dtm_context.context.initialized = false;
out:
- /* free hdcp shared memory */
- amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
- &psp->dtm_context.dtm_shared_mc_addr,
- &psp->dtm_context.dtm_shared_buf);
+ /* free dtm shared memory */
+ psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
return 0;
}
@@ -1798,19 +1843,8 @@ out:
// RAP start
static int psp_rap_init_shared_buf(struct psp_context *psp)
{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for rap ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->rap_context.rap_shared_bo,
- &psp->rap_context.rap_shared_mc_addr,
- &psp->rap_context.rap_shared_buf);
-
- return ret;
+ return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context,
+ PSP_RAP_SHARED_MEM_SIZE);
}
static int psp_rap_load(struct psp_context *psp)
@@ -1818,21 +1852,21 @@ static int psp_rap_load(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+ psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_rap_ucode_size,
- psp->rap_context.rap_shared_mc_addr,
+ psp->rap.size_bytes,
+ psp->rap_context.context.mem_context.shared_mc_addr,
PSP_RAP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->rap_context.rap_initialized = true;
- psp->rap_context.session_id = cmd->resp.session_id;
+ psp->rap_context.context.initialized = true;
+ psp->rap_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->rap_context.mutex);
}
@@ -1846,7 +1880,7 @@ static int psp_rap_unload(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -1866,13 +1900,13 @@ static int psp_rap_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->adev->psp.ta_rap_ucode_size ||
- !psp->adev->psp.ta_rap_start_addr) {
+ if (!psp->rap.size_bytes ||
+ !psp->rap.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
- if (!psp->rap_context.rap_initialized) {
+ if (!psp->rap_context.context.initialized) {
ret = psp_rap_init_shared_buf(psp);
if (ret)
return ret;
@@ -1886,11 +1920,9 @@ static int psp_rap_initialize(struct psp_context *psp)
if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_unload(psp);
- amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
- &psp->rap_context.rap_shared_mc_addr,
- &psp->rap_context.rap_shared_buf);
+ psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
- psp->rap_context.rap_initialized = false;
+ psp->rap_context.context.initialized = false;
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status);
@@ -1905,17 +1937,15 @@ static int psp_rap_terminate(struct psp_context *psp)
{
int ret;
- if (!psp->rap_context.rap_initialized)
+ if (!psp->rap_context.context.initialized)
return 0;
ret = psp_rap_unload(psp);
- psp->rap_context.rap_initialized = false;
+ psp->rap_context.context.initialized = false;
/* free rap shared memory */
- amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
- &psp->rap_context.rap_shared_mc_addr,
- &psp->rap_context.rap_shared_buf);
+ psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
return ret;
}
@@ -1925,7 +1955,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
struct ta_rap_shared_memory *rap_cmd;
int ret = 0;
- if (!psp->rap_context.rap_initialized)
+ if (!psp->rap_context.context.initialized)
return 0;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
@@ -1935,13 +1965,13 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
mutex_lock(&psp->rap_context.mutex);
rap_cmd = (struct ta_rap_shared_memory *)
- psp->rap_context.rap_shared_buf;
+ psp->rap_context.context.mem_context.shared_buf;
memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
rap_cmd->cmd_id = ta_cmd_id;
rap_cmd->validation_method_id = METHOD_A;
- ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
if (ret)
goto out_unlock;
@@ -1958,19 +1988,9 @@ out_unlock:
/* securedisplay start */
static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for sa ta <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->securedisplay_context.securedisplay_shared_bo,
- &psp->securedisplay_context.securedisplay_shared_mc_addr,
- &psp->securedisplay_context.securedisplay_shared_buf);
-
- return ret;
+ return psp_ta_init_shared_buf(
+ psp, &psp->securedisplay_context.context.mem_context,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
}
static int psp_securedisplay_load(struct psp_context *psp)
@@ -1979,19 +1999,19 @@ static int psp_securedisplay_load(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+ memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
- psp->ta_securedisplay_ucode_size,
- psp->securedisplay_context.securedisplay_shared_mc_addr,
+ psp->securedisplay.size_bytes,
+ psp->securedisplay_context.context.mem_context.shared_mc_addr,
PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->securedisplay_context.securedisplay_initialized = true;
- psp->securedisplay_context.session_id = cmd->resp.session_id;
+ psp->securedisplay_context.context.initialized = true;
+ psp->securedisplay_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->securedisplay_context.mutex);
}
@@ -2005,7 +2025,7 @@ static int psp_securedisplay_unload(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -2025,13 +2045,13 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->adev->psp.ta_securedisplay_ucode_size ||
- !psp->adev->psp.ta_securedisplay_start_addr) {
+ if (!psp->securedisplay.size_bytes ||
+ !psp->securedisplay.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
return 0;
}
- if (!psp->securedisplay_context.securedisplay_initialized) {
+ if (!psp->securedisplay_context.context.initialized) {
ret = psp_securedisplay_init_shared_buf(psp);
if (ret)
return ret;
@@ -2048,11 +2068,9 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (ret) {
psp_securedisplay_unload(psp);
- amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
- &psp->securedisplay_context.securedisplay_shared_mc_addr,
- &psp->securedisplay_context.securedisplay_shared_buf);
+ psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
- psp->securedisplay_context.securedisplay_initialized = false;
+ psp->securedisplay_context.context.initialized = false;
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
return -EINVAL;
@@ -2077,19 +2095,17 @@ static int psp_securedisplay_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->securedisplay_context.securedisplay_initialized)
+ if (!psp->securedisplay_context.context.initialized)
return 0;
ret = psp_securedisplay_unload(psp);
if (ret)
return ret;
- psp->securedisplay_context.securedisplay_initialized = false;
+ psp->securedisplay_context.context.initialized = false;
/* free securedisplay shared memory */
- amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
- &psp->securedisplay_context.securedisplay_shared_mc_addr,
- &psp->securedisplay_context.securedisplay_shared_buf);
+ psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
return ret;
}
@@ -2098,7 +2114,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
int ret;
- if (!psp->securedisplay_context.securedisplay_initialized)
+ if (!psp->securedisplay_context.context.initialized)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
@@ -2107,7 +2123,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
mutex_lock(&psp->securedisplay_context.mutex);
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
mutex_unlock(&psp->securedisplay_context.mutex);
@@ -2420,7 +2436,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
struct amdgpu_firmware_info *ucode =
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- struct amdgpu_ras *ras = psp->ras.ras;
+ struct amdgpu_ras *ras = psp->ras_context.ras;
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
return 0;
@@ -2625,7 +2641,7 @@ skip_memalloc:
return ret;
}
- if (psp->adev->psp.ta_fw) {
+ if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
@@ -2697,7 +2713,7 @@ static int psp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- if (psp->adev->psp.ta_fw) {
+ if (psp->ta_fw) {
psp_ras_terminate(psp);
psp_securedisplay_terminate(psp);
psp_rap_terminate(psp);
@@ -2727,7 +2743,7 @@ static int psp_suspend(void *handle)
struct psp_context *psp = &adev->psp;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
- psp->xgmi_context.initialized == 1) {
+ psp->xgmi_context.context.initialized) {
ret = psp_xgmi_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate xgmi ta\n");
@@ -2735,7 +2751,7 @@ static int psp_suspend(void *handle)
}
}
- if (psp->adev->psp.ta_fw) {
+ if (psp->ta_fw) {
ret = psp_ras_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate ras ta\n");
@@ -2817,7 +2833,7 @@ static int psp_resume(void *handle)
}
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
+ ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
* Instead of stop driver initialization
*/
@@ -2826,7 +2842,7 @@ static int psp_resume(void *handle)
"XGMI: Failed to initialize XGMI session\n");
}
- if (psp->adev->psp.ta_fw) {
+ if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
@@ -2978,10 +2994,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
goto out;
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+ adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
@@ -3123,6 +3139,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr->sos.offset_bytes);
+ adev->psp.xgmi_context.supports_extended_data = false;
} else {
/* Load alternate PSP SOS FW */
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
@@ -3137,6 +3154,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
+ adev->psp.xgmi_context.supports_extended_data = true;
}
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
@@ -3266,40 +3284,40 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
- psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->asd_start_addr = ucode_start_addr;
+ psp->asd.fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd.feature_version = le32_to_cpu(desc->fw_version);
+ psp->asd.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->asd.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
- psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_xgmi_start_addr = ucode_start_addr;
+ psp->xgmi.feature_version = le32_to_cpu(desc->fw_version);
+ psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->xgmi.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAS:
- psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_ras_start_addr = ucode_start_addr;
+ psp->ras.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ras.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_HDCP:
- psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_hdcp_start_addr = ucode_start_addr;
+ psp->hdcp.feature_version = le32_to_cpu(desc->fw_version);
+ psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->hdcp.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_DTM:
- psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_dtm_start_addr = ucode_start_addr;
+ psp->dtm.feature_version = le32_to_cpu(desc->fw_version);
+ psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->dtm.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAP:
- psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_rap_start_addr = ucode_start_addr;
+ psp->rap.feature_version = le32_to_cpu(desc->fw_version);
+ psp->rap.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->rap.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
- psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version);
- psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes);
- psp->ta_securedisplay_start_addr = ucode_start_addr;
+ psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version);
+ psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->securedisplay.start_addr = ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 6b1645598fa3..8ef2d28af92a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -136,59 +136,32 @@ struct psp_asd_context {
uint32_t session_id;
};
-struct psp_xgmi_context {
- uint8_t initialized;
- uint32_t session_id;
- struct amdgpu_bo *xgmi_shared_bo;
- uint64_t xgmi_shared_mc_addr;
- void *xgmi_shared_buf;
- struct psp_xgmi_topology_info top_info;
+struct ta_mem_context {
+ struct amdgpu_bo *shared_bo;
+ uint64_t shared_mc_addr;
+ void *shared_buf;
};
-struct psp_ras_context {
- /*ras fw*/
- bool ras_initialized;
+struct ta_context {
+ bool initialized;
uint32_t session_id;
- struct amdgpu_bo *ras_shared_bo;
- uint64_t ras_shared_mc_addr;
- void *ras_shared_buf;
- struct amdgpu_ras *ras;
+ struct ta_mem_context mem_context;
};
-struct psp_hdcp_context {
- bool hdcp_initialized;
- uint32_t session_id;
- struct amdgpu_bo *hdcp_shared_bo;
- uint64_t hdcp_shared_mc_addr;
- void *hdcp_shared_buf;
- struct mutex mutex;
-};
-
-struct psp_dtm_context {
- bool dtm_initialized;
- uint32_t session_id;
- struct amdgpu_bo *dtm_shared_bo;
- uint64_t dtm_shared_mc_addr;
- void *dtm_shared_buf;
- struct mutex mutex;
+struct ta_cp_context {
+ struct ta_context context;
+ struct mutex mutex;
};
-struct psp_rap_context {
- bool rap_initialized;
- uint32_t session_id;
- struct amdgpu_bo *rap_shared_bo;
- uint64_t rap_shared_mc_addr;
- void *rap_shared_buf;
- struct mutex mutex;
+struct psp_xgmi_context {
+ struct ta_context context;
+ struct psp_xgmi_topology_info top_info;
+ bool supports_extended_data;
};
-struct psp_securedisplay_context {
- bool securedisplay_initialized;
- uint32_t session_id;
- struct amdgpu_bo *securedisplay_shared_bo;
- uint64_t securedisplay_shared_mc_addr;
- void *securedisplay_shared_buf;
- struct mutex mutex;
+struct psp_ras_context {
+ struct ta_context context;
+ struct amdgpu_ras *ras;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
@@ -327,11 +300,8 @@ struct psp_context
uint64_t tmr_mc_addr;
/* asd firmware */
- const struct firmware *asd_fw;
- uint32_t asd_fw_version;
- uint32_t asd_feature_version;
- uint32_t asd_ucode_size;
- uint8_t *asd_start_addr;
+ const struct firmware *asd_fw;
+ struct psp_bin_desc asd;
/* toc firmware */
const struct firmware *toc_fw;
@@ -356,36 +326,20 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
uint32_t ta_fw_version;
- uint32_t ta_xgmi_ucode_version;
- uint32_t ta_xgmi_ucode_size;
- uint8_t *ta_xgmi_start_addr;
- uint32_t ta_ras_ucode_version;
- uint32_t ta_ras_ucode_size;
- uint8_t *ta_ras_start_addr;
-
- uint32_t ta_hdcp_ucode_version;
- uint32_t ta_hdcp_ucode_size;
- uint8_t *ta_hdcp_start_addr;
-
- uint32_t ta_dtm_ucode_version;
- uint32_t ta_dtm_ucode_size;
- uint8_t *ta_dtm_start_addr;
-
- uint32_t ta_rap_ucode_version;
- uint32_t ta_rap_ucode_size;
- uint8_t *ta_rap_start_addr;
-
- uint32_t ta_securedisplay_ucode_version;
- uint32_t ta_securedisplay_ucode_size;
- uint8_t *ta_securedisplay_start_addr;
+ struct psp_bin_desc xgmi;
+ struct psp_bin_desc ras;
+ struct psp_bin_desc hdcp;
+ struct psp_bin_desc dtm;
+ struct psp_bin_desc rap;
+ struct psp_bin_desc securedisplay;
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
- struct psp_ras_context ras;
- struct psp_hdcp_context hdcp_context;
- struct psp_dtm_context dtm_context;
- struct psp_rap_context rap_context;
- struct psp_securedisplay_context securedisplay_context;
+ struct psp_ras_context ras_context;
+ struct ta_cp_context hdcp_context;
+ struct ta_cp_context dtm_context;
+ struct ta_cp_context rap_context;
+ struct ta_cp_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
@@ -452,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
-int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology,
+ bool get_extended_data);
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 51909bf8798c..12010c988c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
- adev->psp.rap_context.rap_shared_buf;
+ adev->psp.rap_context.context.mem_context.shared_buf;
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
dev_info(adev->dev, "RAP test failed, the output is:\n");
@@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
- if (!adev->psp.rap_context.rap_initialized)
+ if (!adev->psp.rap_context.context.initialized)
return;
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 194f7ccfbf94..96a8fd0ca1df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -64,7 +64,6 @@ const char *ras_block_string[] = {
};
#define ras_err_str(i) (ras_error_string[ffs(i)])
-#define ras_block_str(i) (ras_block_string[i])
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@ -530,7 +529,7 @@ static inline void put_obj(struct ras_manager *obj)
if (obj && (--obj->use == 0))
list_del(&obj->node);
if (obj && (obj->use < 0))
- DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
}
/* make one obj and return it. */
@@ -793,7 +792,6 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
.type = default_ras_type,
.sub_block_index = 0,
};
- strcpy(head.name, ras_block_str(i));
if (bypass) {
/*
* bypass psp. vbios enable ras for us.
@@ -1866,7 +1864,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras_eeprom_control *control =
- &adev->psp.ras.ras->eeprom_control;
+ &adev->psp.ras_context.ras->eeprom_control;
struct eeprom_table_record *bps;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 4d9c63f2f377..eae604fd90b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -49,10 +49,14 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MP0,
AMDGPU_RAS_BLOCK__MP1,
AMDGPU_RAS_BLOCK__FUSE,
+ AMDGPU_RAS_BLOCK__MPIO,
AMDGPU_RAS_BLOCK__LAST
};
+extern const char *ras_block_string[];
+
+#define ras_block_str(i) (ras_block_string[i])
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
@@ -306,7 +310,6 @@ struct ras_common_if {
enum amdgpu_ras_block block;
enum amdgpu_ras_error_type type;
uint32_t sub_block_index;
- /* block name */
char name[32];
};
@@ -418,7 +421,7 @@ struct ras_badpage {
/* interfaces for IP */
struct ras_fs_if {
struct ras_common_if head;
- char sysfs_name[32];
+ const char* sysfs_name;
char debugfs_name[32];
};
@@ -470,8 +473,8 @@ struct ras_debug_if {
* 8: feature disable
*/
-#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
-#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
+#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras)
+#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con))
/* check if ras is supported on block, say, sdma, gfx */
static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 194590252bb9..dc44c946a244 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -114,21 +114,22 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
+ u8 i2c_addr;
+
if (!control)
return false;
- control->i2c_address = 0;
-
- if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)&control->i2c_address))
- {
- if (control->i2c_address == 0xA0)
- control->i2c_address = 0;
- else if (control->i2c_address == 0xA8)
- control->i2c_address = 0x40000;
- else {
- dev_warn(adev->dev, "RAS EEPROM I2C address not supported");
- return false;
- }
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9c11ced4312c..e713d31619fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -48,6 +48,9 @@
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+/* fence flag bit to indicate the face is embedded in job*/
+#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
+
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
@@ -118,7 +121,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
unsigned flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index de91d29c9d96..65debb65a5df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->sdma.ras_if->sub_block_index = 0;
- strcpy(adev->sdma.ras_if->name, "sdma");
}
fs_info.head = ih_info->head = *adev->sdma.ras_if;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 123453999093..cc7597a15fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -80,7 +80,7 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id)
{
- *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+ *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
memset(*cmd, 0, sizeof(struct securedisplay_cmd));
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
(*cmd)->cmd_id = command_id;
@@ -170,7 +170,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- if (!adev->psp.securedisplay_context.securedisplay_initialized)
+ if (!adev->psp.securedisplay_context.context.initialized)
return;
debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5fdeceaa979f..abd8469380e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
-FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e2e2624ac653..7c2538db3cd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -136,21 +136,11 @@ struct psp_firmware_header_v2_0 {
/* version_major=1, version_minor=0 */
struct ta_firmware_header_v1_0 {
struct common_firmware_header header;
- uint32_t ta_xgmi_ucode_version;
- uint32_t ta_xgmi_offset_bytes;
- uint32_t ta_xgmi_size_bytes;
- uint32_t ta_ras_ucode_version;
- uint32_t ta_ras_offset_bytes;
- uint32_t ta_ras_size_bytes;
- uint32_t ta_hdcp_ucode_version;
- uint32_t ta_hdcp_offset_bytes;
- uint32_t ta_hdcp_size_bytes;
- uint32_t ta_dtm_ucode_version;
- uint32_t ta_dtm_offset_bytes;
- uint32_t ta_dtm_size_bytes;
- uint32_t ta_securedisplay_ucode_version;
- uint32_t ta_securedisplay_offset_bytes;
- uint32_t ta_securedisplay_size_bytes;
+ struct psp_fw_legacy_bin_desc xgmi;
+ struct psp_fw_legacy_bin_desc ras;
+ struct psp_fw_legacy_bin_desc hdcp;
+ struct psp_fw_legacy_bin_desc dtm;
+ struct psp_fw_legacy_bin_desc securedisplay;
};
enum ta_fw_type {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 0c7c56a91b25..a90029ee9733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -41,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->umc.ras_if->sub_block_index = 0;
- strcpy(adev->umc.ras_if->name, "umc");
}
ih_info.head = fs_info.head = *adev->umc.ras_if;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0f576f294d8a..d451c359606a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -326,7 +326,6 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
- cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 1ae7f824adc7..8e8dee9fac9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -218,7 +218,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 6780df0fb265..008a308a4eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -258,8 +258,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 12a7cc2f01cd..ca058fbcccd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -532,9 +532,9 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2a88ed5d983b..6b15cad78de9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -926,7 +926,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.size = amdgpu_vm_bo_size(adev, level);
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
+ bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -1218,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (vm_flush_needed || pasid_mapping_needed) {
- r = amdgpu_fence_emit(ring, &fence, 0);
+ r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
}
@@ -3345,12 +3345,13 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
* @addr: Address of the fault
+ * @write_fault: true is write fault, false is read fault
*
* Try to gracefully handle a VM fault. Return true if the fault was handled and
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- uint64_t addr)
+ uint64_t addr, bool write_fault)
{
bool is_compute_context = false;
struct amdgpu_bo *root;
@@ -3375,7 +3376,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context &&
- !svm_range_restore_pages(adev, pasid, addr)) {
+ !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 80cc9ab2c1d0..85fcfb8c5efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -448,7 +448,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- uint64_t addr);
+ uint64_t addr, bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2fd77c36a1ff..7b2b0980ec41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
@@ -487,7 +487,7 @@ error_sub:
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated VRAM again.
*/
@@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
- * @mem: TTM memory object
+ * @res: TTM memory object
* @offset: byte offset from the base of VRAM BO
* @length: number of bytes to export in sg_table
* @dev: the other device
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 258cf86b32f6..978ac927ac11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -32,6 +32,10 @@
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
+#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
+#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
+#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
+
static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
@@ -63,6 +67,33 @@ static const int wafl_pcs_err_status_reg_arct[] = {
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};
+static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
+ smnPCS_XGMI23_PCS_ERROR_STATUS,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
+ smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
+};
+
+static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
+};
+
+static const int walf_pcs_err_status_reg_aldebaran[] = {
+ smnPCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
+};
+
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
{"XGMI PCS DataLossErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
@@ -498,6 +529,32 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
return -EINVAL;
}
+/*
+ * Devices that support extended data require the entire hive to initialize with
+ * the shared memory buffer flag set.
+ *
+ * Hive locks and conditions apply - see amdgpu_xgmi_add_device
+ */
+static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
+ bool set_extended_data)
+{
+ struct amdgpu_device *tmp_adev;
+ int ret;
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Failed to initialize xgmi session for data partition %i\n",
+ set_extended_data);
+ return ret;
+ }
+
+ }
+
+ return 0;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -512,7 +569,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset &&
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
- ret = psp_xgmi_initialize(&adev->psp);
+ ret = psp_xgmi_initialize(&adev->psp, false, true);
if (ret) {
dev_err(adev->dev,
"XGMI: Failed to initialize xgmi session\n");
@@ -575,7 +632,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
- &tmp_adev->psp.xgmi_context.top_info);
+ &tmp_adev->psp.xgmi_context.top_info, false);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -585,6 +642,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit_unlock;
}
}
+
+ /* get topology again for hives that support extended data */
+ if (adev->psp.xgmi_context.supports_extended_data) {
+
+ /* initialize the hive to get extended data. */
+ ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
+ if (ret)
+ goto exit_unlock;
+
+ /* get the extended data. */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info, true);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ goto exit_unlock;
+ }
+ }
+
+ /* initialize the hive to get non-extended data for the next round. */
+ ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
+ if (ret)
+ goto exit_unlock;
+
+ }
}
if (!ret && !adev->gmc.xgmi.pending_reset)
@@ -663,7 +748,6 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->gmc.xgmi.ras_if->sub_block_index = 0;
- strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
}
ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
@@ -718,6 +802,17 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
pcs_clear_status(adev,
xgmi_pcs_err_status_reg_vg20[i]);
break;
+ case CHIP_ALDEBARAN:
+ for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
+ pcs_clear_status(adev,
+ xgmi23_pcs_err_status_reg_aldebaran[i]);
+ for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
+ pcs_clear_status(adev,
+ xgmi23_pcs_err_status_reg_aldebaran[i]);
+ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
+ pcs_clear_status(adev,
+ walf_pcs_err_status_reg_aldebaran[i]);
+ break;
default:
break;
}
@@ -795,7 +890,6 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
}
break;
case CHIP_VEGA20:
- default:
/* check xgmi pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
@@ -811,6 +905,32 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
data, &ue_cnt, &ce_cnt, false);
}
break;
+ case CHIP_ALDEBARAN:
+ /* check xgmi23 pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
+ data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check xgmi3x16 pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
+ data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
+ data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ default:
+ dev_warn(adev->dev, "XGMI RAS error query not supported");
+ break;
}
adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 159a2a4385a1..afad094f84c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -851,7 +851,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
pll->reference_div = amdgpu_crtc->pll_reference_div;
pll->post_div = amdgpu_crtc->pll_post_div;
- amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
+ amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 1769c4cba2ad..00a2b36a24b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
};
-/**
+/*
* This shader is used to clear VGPRS and LDS, and also write the input
* pattern into the write back buffer, which will be used by driver to
* check whether all SIMDs have been covered.
@@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
-/**
+/*
* The below shaders are used to clear SGPRS, and also write the input
* pattern into the write back buffer. The first two dispatch should be
* scheduled simultaneously which make sure that all SGPRS could be
@@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
-/**
+/*
* This shader is used to clear the uninitiated sgprs after the above
* two dispatches, because of hardware feature, dispath 0 couldn't clear
* top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index 8fca72ebd11c..497b86c376c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -75,9 +75,8 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
max_physical_node_id = 7;
break;
case CHIP_ALDEBARAN:
- /* just using duplicates for Aldebaran support, revisit later */
- max_num_physical_nodes = 8;
- max_physical_node_id = 7;
+ max_num_physical_nodes = 16;
+ max_physical_node_id = 15;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 24b781e90bef..41c3a0d70b7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -93,6 +93,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
bool retry_fault = !!(entry->src_data[1] & 0x80);
+ bool write_fault = !!(entry->src_data[1] & 0x20);
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
struct amdgpu_task_info task_info;
uint32_t status = 0;
@@ -121,7 +122,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 097230b5e946..d90c16a6b2b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -55,6 +55,7 @@
#include "umc_v6_0.h"
#include "umc_v6_7.h"
#include "hdp_v4_0.h"
+#include "mca_v3_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
@@ -506,6 +507,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
bool retry_fault = !!(entry->src_data[1] & 0x80);
+ bool write_fault = !!(entry->src_data[1] & 0x20);
uint32_t status = 0, cid = 0, rw = 0;
struct amdgpu_task_info task_info;
struct amdgpu_vmhub *hub;
@@ -536,7 +538,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
return 1;
}
@@ -1229,6 +1231,18 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
}
+static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->mca.funcs = &mca_v3_0_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1250,6 +1264,7 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_mmhub_ras_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
gmc_v9_0_set_hdp_ras_funcs(adev);
+ gmc_v9_0_set_mca_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -1461,6 +1476,8 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gfxhub.funcs->init(adev);
adev->mmhub.funcs->init(adev);
+ if (adev->mca.funcs)
+ adev->mca.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
new file mode 100644
index 000000000000..058b65730a84
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+#include "amdgpu_mca.h"
+
+#define smnMCMP0_STATUST0 0x03830408
+#define smnMCMP1_STATUST0 0x03b30408
+#define smnMCMPIO_STATUST0 0x0c930408
+
+
+static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_mca_query_ras_error_count(adev,
+ smnMCMP0_STATUST0,
+ ras_error_status);
+}
+
+static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
+{
+ return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
+}
+
+static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
+ .ras_late_init = mca_v3_0_mp0_ras_late_init,
+ .ras_fini = mca_v3_0_mp0_ras_fini,
+ .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
+ .query_ras_error_address = NULL,
+ .ras_block = AMDGPU_RAS_BLOCK__MP0,
+ .sysfs_name = "mp0_err_count",
+};
+
+static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_mca_query_ras_error_count(adev,
+ smnMCMP1_STATUST0,
+ ras_error_status);
+}
+
+static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
+{
+ return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
+}
+
+static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
+ .ras_late_init = mca_v3_0_mp1_ras_late_init,
+ .ras_fini = mca_v3_0_mp1_ras_fini,
+ .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
+ .query_ras_error_address = NULL,
+ .ras_block = AMDGPU_RAS_BLOCK__MP1,
+ .sysfs_name = "mp1_err_count",
+};
+
+static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_mca_query_ras_error_count(adev,
+ smnMCMPIO_STATUST0,
+ ras_error_status);
+}
+
+static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
+{
+ return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
+}
+
+static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
+ .ras_late_init = mca_v3_0_mpio_ras_late_init,
+ .ras_fini = mca_v3_0_mpio_ras_fini,
+ .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
+ .query_ras_error_address = NULL,
+ .ras_block = AMDGPU_RAS_BLOCK__MPIO,
+ .sysfs_name = "mpio_err_count",
+};
+
+
+static void mca_v3_0_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_mca *mca = &adev->mca;
+
+ mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
+ mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
+ mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
+}
+
+const struct amdgpu_mca_funcs mca_v3_0_funcs = {
+ .init = mca_v3_0_init,
+}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
new file mode 100644
index 000000000000..b899b86194c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __MCA_V3_0_H__
+#define __MCA_V3_0_H__
+
+extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 20958639b601..2cdab8062c86 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -24,9 +24,7 @@
#ifndef __MMSCH_V1_0_H__
#define __MMSCH_V1_0_H__
-#define MMSCH_VERSION_MAJOR 1
-#define MMSCH_VERSION_MINOR 0
-#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+#define MMSCH_VERSION 0x1
enum mmsch_v1_0_command_type {
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ff2307d7ee0f..23b066bcffb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
+ xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 50572635d0f8..bd3b23171579 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -37,6 +37,7 @@ enum idh_request {
IDH_REQ_GPU_RESET_ACCESS,
IDH_LOG_VF_ERROR = 200,
+ IDH_READY_TO_RESET = 201,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 9f7aac435d69..a35e6d87e537 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -96,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
- int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+ int r;
+ uint64_t timeout, now;
+
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
@@ -104,8 +108,8 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
return 0;
msleep(10);
- timeout -= 10;
- } while (timeout > 1);
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ } while (timeout > now);
return -ETIME;
@@ -149,9 +153,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
- int r;
+ int r, retry = 1;
enum idh_event event = -1;
+send_request:
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
switch (req) {
@@ -170,6 +175,9 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
if (event != -1) {
r = xgpu_nv_poll_msg(adev, event);
if (r) {
+ if (retry++ < 2)
+ goto send_request;
+
if (req != IDH_REQ_GPU_INIT_DATA) {
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return r;
@@ -279,6 +287,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
+ xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 9f5808616174..73887b0aa1d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -37,7 +37,8 @@ enum idh_request {
IDH_REQ_GPU_RESET_ACCESS,
IDH_REQ_GPU_INIT_DATA,
- IDH_LOG_VF_ERROR = 200,
+ IDH_LOG_VF_ERROR = 200,
+ IDH_READY_TO_RESET = 201,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index cef929746739..f50045cebd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -85,6 +85,14 @@
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
+#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
+#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
+#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
+#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+
+#define mmBIF_INTR_CNTL_ALDE 0x0101
+#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -346,14 +354,21 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
struct ras_err_data err_data = {0, 0, 0, NULL};
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
+ else
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_CNTLR_INTERRUPT_CLEAR, 1);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
if (!ras->disable_ras_err_cnt_harvest) {
/*
@@ -372,13 +387,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
"errors detected in %s block, "
"no user action is needed.\n",
obj->err_data.ce_count,
- adev->nbio.ras_if->name);
+ ras_block_str(adev->nbio.ras_if->block));
if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n",
obj->err_data.ue_count,
- adev->nbio.ras_if->name);
+ ras_block_str(adev->nbio.ras_if->block));
}
dev_info(adev->dev, "RAS controller interrupt triggered "
@@ -395,14 +410,22 @@ static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_d
{
uint32_t bif_doorbell_intr_cntl;
- bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
+ else
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
amdgpu_ras_global_ras_isr(adev);
}
@@ -420,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
}
return 0;
@@ -456,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
@@ -572,7 +612,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
bool enable)
{
- WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
+ else
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 4b1cc5e9ee92..5872d68ed13d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -84,29 +84,29 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.ta_hdcp_ucode_version =
- le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
- adev->psp.ta_hdcp_ucode_size =
- le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
- adev->psp.ta_hdcp_start_addr =
+ adev->psp.hdcp.feature_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_dtm_ucode_version =
- le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
- adev->psp.ta_dtm_ucode_size =
- le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
- adev->psp.ta_dtm_start_addr =
- (uint8_t *)adev->psp.ta_hdcp_start_addr +
- le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
-
- adev->psp.ta_securedisplay_ucode_version =
- le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
- adev->psp.ta_securedisplay_ucode_size =
- le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
- adev->psp.ta_securedisplay_start_addr =
- (uint8_t *)adev->psp.ta_hdcp_start_addr +
- le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+ adev->psp.dtm.feature_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm.start_addr =
+ (uint8_t *)adev->psp.hdcp.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ adev->psp.securedisplay.feature_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay.start_addr =
+ (uint8_t *)adev->psp.hdcp.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 8862684f8b43..29bf9f09944b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -151,15 +151,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
- adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
- adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+ adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
- adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
- adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
- le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
+ adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
+ le32_to_cpu(ta_hdr->ras.offset_bytes);
}
break;
case CHIP_NAVI10:
@@ -186,17 +186,17 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
- adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
- adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+ adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
- adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
- adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
- le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
break;
case CHIP_SIENNA_CICHLID:
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 0c908d4566e8..cc649406234b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -84,23 +84,23 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.ta_hdcp_ucode_version =
- le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
- adev->psp.ta_hdcp_ucode_size =
- le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
- adev->psp.ta_hdcp_start_addr =
+ adev->psp.hdcp.feature_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ta_dtm_ucode_version =
- le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
- adev->psp.ta_dtm_ucode_size =
- le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
- adev->psp.ta_dtm_start_addr =
- (uint8_t *)adev->psp.ta_hdcp_start_addr +
- le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ adev->psp.dtm.feature_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm.start_addr =
+ (uint8_t *)adev->psp.hdcp.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f7b56a746c15..0fc97c364fd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1353,8 +1353,6 @@ static int soc15_common_early_init(void *handle)
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
- AMD_CG_SUPPORT_GFX_CGCG |
- AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
index cce7127afeaa..da815a93d46e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -134,7 +134,8 @@ struct ta_xgmi_shared_memory {
uint32_t cmd_id;
uint32_t resp_id;
enum ta_xgmi_status xgmi_status;
- uint32_t reserved;
+ uint8_t flag_extend_link_record;
+ uint8_t reserved0[3];
union ta_xgmi_cmd_input xgmi_in_message;
union ta_xgmi_cmd_output xgmi_out_message;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 6c0e91495365..7232241e3bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -698,6 +698,30 @@ static int uvd_v3_1_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
+
if (RREG32(mmUVD_STATUS) != 0)
uvd_v3_1_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index a301518e4957..52d6de969f46 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -212,6 +212,30 @@ static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
+
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a4d5bd21c83c..db6d06758e4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -210,6 +210,30 @@ static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
+
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
@@ -224,7 +248,6 @@ static int uvd_v5_0_suspend(void *handle)
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
- uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
return amdgpu_uvd_suspend(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index cf3803f8f075..bc571833632e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -543,6 +543,30 @@ static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
+
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 939bcfa2a4ec..b6e82d75561f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -606,6 +606,30 @@ static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
+
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c7d28c169be5..b70c17f0c52e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -477,6 +477,31 @@ static int vce_v2_0_hw_init(void *handle)
static int vce_v2_0_hw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3b82fb289ef6..9de66893ccd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -490,6 +490,29 @@ static int vce_v3_0_hw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
r = vce_v3_0_wait_for_idle(handle);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 90910d19db12..fec902b800c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -542,6 +542,29 @@ static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
vce_v4_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index a972ef5eae68..f8fce9d05f50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -211,6 +211,15 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
WARN_ON(!old);
}
+static void program_trap_handler_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ if (dqm->dev->kfd2kgd->program_trap_handler_settings)
+ dqm->dev->kfd2kgd->program_trap_handler_settings(
+ dqm->dev->kgd, qpd->vmid,
+ qpd->tba_addr, qpd->tma_addr);
+}
+
static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
@@ -241,6 +250,10 @@ static int allocate_vmid(struct device_queue_manager *dqm,
program_sh_mem_settings(dqm, qpd);
+ if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
+ dqm->dev->cwsr_enabled)
+ program_trap_handler_settings(dqm, qpd);
+
/* qpd->page_table_base is set earlier when register_process()
* is called, i.e. when the first queue is created.
*/
@@ -582,7 +595,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ (dqm->dev->cwsr_enabled?
+ KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
pr_err("destroy mqd failed\n");
@@ -675,7 +690,9 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
continue;
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ (dqm->dev->cwsr_enabled?
+ KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval && !ret)
/* Return the first error, but keep going to
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 88813dad731f..c021519af810 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t *se_mask)
{
struct kfd_cu_info cu_info;
- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
- int i, se, sh, cu = 0;
-
+ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
+ int i, se, sh, cu;
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
+ /* Exceeding these bounds corrupts the stack and indicates a coding error.
+ * Returning with no CU's enabled will hang the queue, which should be
+ * attention grabbing.
+ */
+ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
+ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
+ return;
+ }
+ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
+ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
+ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
+ return;
+ }
+ /* Count active CUs per SH.
+ *
+ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
+ * represented in the high bits of each SH's enable mask (the upper and lower
+ * 16 bits of se_mask) and will take care of the actual distribution of
+ * disabled CUs within each SH automatically.
+ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
+ *
+ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
+ */
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
-
- /* Symmetrically map cu_mask to all SEs:
- * cu_mask[0] bit0 -> se_mask[0] bit0;
- * cu_mask[0] bit1 -> se_mask[1] bit0;
- * ... (if # SE is 4)
- * cu_mask[0] bit4 -> se_mask[0] bit1;
+ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
+
+ /* Symmetrically map cu_mask to all SEs & SHs:
+ * se_mask programs up to 2 SH in the upper and lower 16 bits.
+ *
+ * Examples
+ * Assuming 1 SH/SE, 4 SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0
+ * cu_mask[0] bit1 -> se_mask[1] bit0
+ * ...
+ * cu_mask[0] bit4 -> se_mask[0] bit1
+ * ...
+ *
+ * Assuming 2 SH/SE, 4 SEs
+ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
+ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
+ * ...
+ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
+ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
+ * ...
+ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
+ *
+ * First ensure all CUs are disabled, then enable user specified CUs.
*/
- se = 0;
- for (i = 0; i < cu_mask_count; i++) {
- if (cu_mask[i / 32] & (1 << (i % 32)))
- se_mask[se] |= 1 << cu;
-
- do {
- se++;
- if (se == cu_info.num_shader_engines) {
- se = 0;
- cu++;
+ for (i = 0; i < cu_info.num_shader_engines; i++)
+ se_mask[i] = 0;
+
+ i = 0;
+ for (cu = 0; cu < 16; cu++) {
+ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
+ for (se = 0; se < cu_info.num_shader_engines; se++) {
+ if (cu_per_sh[se][sh] > cu) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << (cu + sh * 16);
+ i++;
+ if (i == cu_mask_count)
+ return;
+ }
}
- } while (cu >= cu_per_se[se] && cu < 32);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index b5e2ea7550d4..6e6918ccedfd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -27,6 +27,7 @@
#include "kfd_priv.h"
#define KFD_MAX_NUM_SE 8
+#define KFD_MAX_NUM_SH_PER_SE 2
/**
* struct mqd_manager
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7df69b77bc5c..9fc8021bb0ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -120,6 +120,7 @@ static void svm_range_remove_notifier(struct svm_range *prange)
static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
+ unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns, uint32_t gpuidx)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
@@ -136,7 +137,8 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
prange->dma_addr[gpuidx] = addr;
}
- for (i = 0; i < prange->npages; i++) {
+ addr += offset;
+ for (i = 0; i < npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
"leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
@@ -167,6 +169,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
static int
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
+ unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns)
{
struct kfd_process *p;
@@ -187,7 +190,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
}
adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
+ r = svm_range_dma_map_dev(adev, prange, offset, npages,
+ hmm_pfns, gpuidx);
if (r)
break;
}
@@ -1088,11 +1092,6 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
-
- pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
- prange->svms, prange->start, prange->last,
- (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
-
return pte_flags;
}
@@ -1156,7 +1155,8 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
static int
svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct svm_range *prange, dma_addr_t *dma_addr,
+ struct svm_range *prange, unsigned long offset,
+ unsigned long npages, bool readonly, dma_addr_t *dma_addr,
struct amdgpu_device *bo_adev, struct dma_fence **fence)
{
struct amdgpu_bo_va bo_va;
@@ -1167,14 +1167,15 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int r = 0;
int64_t i;
- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
- prange->last);
+ last_start = prange->start + offset;
+
+ pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
+ last_start, last_start + npages - 1, readonly);
if (prange->svm_bo && prange->ttm_res)
bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
- last_start = prange->start;
- for (i = 0; i < prange->npages; i++) {
+ for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
if ((prange->start + i) < prange->last &&
@@ -1183,13 +1184,21 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
+
pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
- last_start,
+ if (readonly)
+ pte_flags &= ~AMDGPU_PTE_WRITEABLE;
+
+ pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
+ prange->svms, last_start, prange->start + i,
+ (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
+ pte_flags);
+
+ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
+ NULL, last_start,
prange->start + i, pte_flags,
last_start - prange->start,
- NULL,
- dma_addr,
+ NULL, dma_addr,
&vm->last_update,
&table_freed);
if (r) {
@@ -1220,8 +1229,10 @@ out:
return r;
}
-static int svm_range_map_to_gpus(struct svm_range *prange,
- unsigned long *bitmap, bool wait)
+static int
+svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
+ unsigned long npages, bool readonly,
+ unsigned long *bitmap, bool wait)
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
@@ -1257,7 +1268,8 @@ static int svm_range_map_to_gpus(struct svm_range *prange,
}
r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
- prange, prange->dma_addr[gpuidx],
+ prange, offset, npages, readonly,
+ prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL);
if (r)
break;
@@ -1390,7 +1402,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
int32_t gpuidx, bool intr, bool wait)
{
struct svm_validate_context ctx;
- struct hmm_range *hmm_range;
+ unsigned long start, end, addr;
struct kfd_process *p;
void *owner;
int32_t idx;
@@ -1448,40 +1460,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
break;
}
}
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true, owner);
- if (r) {
- pr_debug("failed %d to get svm range pages\n", r);
- goto unreserve_out;
- }
- r = svm_range_dma_map(prange, ctx.bitmap,
- hmm_range->hmm_pfns);
- if (r) {
- pr_debug("failed %d to dma map range\n", r);
- goto unreserve_out;
- }
+ start = prange->start << PAGE_SHIFT;
+ end = (prange->last + 1) << PAGE_SHIFT;
+ for (addr = start; addr < end && !r; ) {
+ struct hmm_range *hmm_range;
+ struct vm_area_struct *vma;
+ unsigned long next;
+ unsigned long offset;
+ unsigned long npages;
+ bool readonly;
- prange->validated_once = true;
+ vma = find_vma(mm, addr);
+ if (!vma || addr < vma->vm_start) {
+ r = -EFAULT;
+ goto unreserve_out;
+ }
+ readonly = !(vma->vm_flags & VM_WRITE);
- svm_range_lock(prange);
- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
- pr_debug("hmm update the range, need validate again\n");
- r = -EAGAIN;
- goto unlock_out;
- }
- if (!list_empty(&prange->child_list)) {
- pr_debug("range split by unmap in parallel, validate again\n");
- r = -EAGAIN;
- goto unlock_out;
- }
+ next = min(vma->vm_end, end);
+ npages = (next - addr) >> PAGE_SHIFT;
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+ addr, npages, &hmm_range,
+ readonly, true, owner);
+ if (r) {
+ pr_debug("failed %d to get svm range pages\n", r);
+ goto unreserve_out;
+ }
- r = svm_range_map_to_gpus(prange, ctx.bitmap, wait);
+ offset = (addr - start) >> PAGE_SHIFT;
+ r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
+ hmm_range->hmm_pfns);
+ if (r) {
+ pr_debug("failed %d to dma map range\n", r);
+ goto unreserve_out;
+ }
+
+ svm_range_lock(prange);
+ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ pr_debug("hmm update the range, need validate again\n");
+ r = -EAGAIN;
+ goto unlock_out;
+ }
+ if (!list_empty(&prange->child_list)) {
+ pr_debug("range split by unmap in parallel, validate again\n");
+ r = -EAGAIN;
+ goto unlock_out;
+ }
+
+ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+ ctx.bitmap, wait);
unlock_out:
- svm_range_unlock(prange);
+ svm_range_unlock(prange);
+
+ addr = next;
+ }
+
+ if (addr == end)
+ prange->validated_once = true;
+
unreserve_out:
svm_range_unreserve_bos(&ctx);
@@ -2400,9 +2438,29 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
WRITE_ONCE(pdd->faults, pdd->faults + 1);
}
+static bool
+svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
+{
+ unsigned long requested = VM_READ;
+ struct vm_area_struct *vma;
+
+ if (write_fault)
+ requested |= VM_WRITE;
+
+ vma = find_vma(mm, addr << PAGE_SHIFT);
+ if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
+ pr_debug("address 0x%llx VMA is removed\n", addr);
+ return true;
+ }
+
+ pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
+ vma->vm_flags);
+ return (vma->vm_flags & requested) == requested;
+}
+
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
- uint64_t addr)
+ uint64_t addr, bool write_fault)
{
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
@@ -2426,7 +2484,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
- return -EFAULT;
+ r = -EFAULT;
+ goto out;
}
svms = &p->svms;
@@ -2484,6 +2543,13 @@ retry_write_locked:
goto out_unlock_range;
}
+ if (!svm_fault_allowed(mm, addr, write_fault)) {
+ pr_debug("fault addr 0x%llx no %s permission\n", addr,
+ write_fault ? "write" : "read");
+ r = -EPERM;
+ goto out_unlock_range;
+ }
+
best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
if (best_loc == -1) {
pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
@@ -2675,22 +2741,26 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
return 0;
}
-/* svm_range_best_prefetch_location - decide the best prefetch location
+/**
+ * svm_range_best_prefetch_location - decide the best prefetch location
* @prange: svm range structure
*
* For xnack off:
- * If range map to single GPU, the best acutal location is prefetch loc, which
+ * If range map to single GPU, the best prefetch location is prefetch_loc, which
* can be CPU or GPU.
*
- * If range map to multiple GPUs, only if mGPU connection on xgmi same hive,
- * the best actual location could be prefetch_loc GPU. If mGPU connection on
- * PCIe, the best actual location is always CPU, because GPU cannot access vram
- * of other GPUs, assuming PCIe small bar (large bar support is not upstream).
+ * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
+ * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
+ * the best prefetch location is always CPU, because GPU can not have coherent
+ * mapping VRAM of other GPUs even with large-BAR PCIe connection.
*
* For xnack on:
- * The best actual location is prefetch location. If mGPU connection on xgmi
- * same hive, range map to multiple GPUs. Otherwise, the range only map to
- * actual location GPU. Other GPU access vm fault will trigger migration.
+ * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
+ * prefetch_loc, other GPU access will generate vm fault and trigger migration.
+ *
+ * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
+ * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
+ * prefetch location is always CPU.
*
* Context: Process context
*
@@ -2710,11 +2780,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
p = container_of(prange->svms, struct kfd_process, svms);
- /* xnack on */
- if (p->xnack_enabled)
- goto out;
-
- /* xnack off */
if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
goto out;
@@ -2724,8 +2789,12 @@ svm_range_best_prefetch_location(struct svm_range *prange)
best_loc = 0;
goto out;
}
- bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
- MAX_GPU_INSTANCE);
+
+ if (p->xnack_enabled)
+ bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
+ else
+ bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
+ MAX_GPU_INSTANCE);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
@@ -3027,6 +3096,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
+ /* Flush pending deferred work to avoid racing with deferred actions from
+ * previous memory map changes (e.g. munmap). Concurrent memory map changes
+ * can still race with get_attr because we don't hold the mmap lock. But that
+ * would be a race condition in the application anyway, and undefined
+ * behaviour is acceptable in that case.
+ */
+ flush_work(&p->svms.deferred_list_work);
+
mmap_read_lock(mm);
if (!svm_range_is_valid(mm, start, size)) {
pr_debug("invalid range\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 3fc1fd8b4fbc..c6ec55354c7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -175,7 +175,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr);
+ unsigned int pasid, uint64_t addr, bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
struct svm_range *prange, struct mm_struct *mm,
@@ -209,7 +209,8 @@ static inline void svm_range_list_fini(struct kfd_process *p)
}
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr)
+ unsigned int pasid, uint64_t addr,
+ bool write_fault)
{
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3e28f17c84fa..9b1fc54555ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
-static void event_mall_stutter(struct work_struct *work)
+static void vblank_control_worker(struct work_struct *work)
{
-
- struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
+ struct vblank_control_work *vblank_work =
+ container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
mutex_lock(&dm->dc_lock);
@@ -1061,23 +1061,25 @@ static void event_mall_stutter(struct work_struct *work)
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
- mutex_unlock(&dm->dc_lock);
-}
-
-static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
-{
- struct vblank_workqueue *vblank_work;
-
- vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(vblank_work)) {
- kfree(vblank_work);
- return NULL;
+ /* Control PSR based on vblank requirements from OS */
+ if (vblank_work->stream && vblank_work->stream->link) {
+ if (vblank_work->enable) {
+ if (vblank_work->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+ } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
+ !vblank_work->stream->link->psr_settings.psr_allow_active &&
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
}
- INIT_WORK(&vblank_work->mall_work, event_mall_stutter);
+ mutex_unlock(&dm->dc_lock);
+
+ dc_stream_release(vblank_work->stream);
- return vblank_work;
+ kfree(vblank_work);
}
+
#endif
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
@@ -1198,7 +1200,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->apu_flags) {
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
mmhub_read_system_context(adev, &pa_config);
@@ -1220,12 +1222,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.dc->caps.max_links > 0) {
- adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
-
- if (!adev->dm.vblank_workqueue)
+ adev->dm.vblank_control_workqueue =
+ create_singlethread_workqueue("dm_vblank_control_workqueue");
+ if (!adev->dm.vblank_control_workqueue)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
- else
- DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
}
#endif
@@ -1298,6 +1298,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
int i;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->dm.vblank_control_workqueue) {
+ destroy_workqueue(adev->dm.vblank_control_workqueue);
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+#endif
+
for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
}
@@ -1321,14 +1328,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
dc_deinit_callbacks(adev->dm.dc);
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->dm.vblank_workqueue) {
- adev->dm.vblank_workqueue->dm = NULL;
- kfree(adev->dm.vblank_workqueue);
- adev->dm.vblank_workqueue = NULL;
- }
-#endif
-
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
if (dc_enable_dmub_notifications(adev->dm.dc)) {
@@ -6000,7 +5999,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = &adev->dm;
- unsigned long flags;
+ struct vblank_control_work *work;
#endif
int rc = 0;
@@ -6025,12 +6024,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- spin_lock_irqsave(&dm->vblank_lock, flags);
- dm->vblank_workqueue->dm = dm;
- dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
- dm->vblank_workqueue->enable = enable;
- spin_unlock_irqrestore(&dm->vblank_lock, flags);
- schedule_work(&dm->vblank_workqueue->mall_work);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ INIT_WORK(&work->work, vblank_control_worker);
+ work->dm = dm;
+ work->acrtc = acrtc;
+ work->enable = enable;
+
+ if (acrtc_state->stream) {
+ dc_stream_retain(acrtc_state->stream);
+ work->stream = acrtc_state->stream;
+ }
+
+ queue_work(dm->vblank_control_workqueue, &work->work);
#endif
return 0;
@@ -8635,6 +8643,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ flush_workqueue(dm->vblank_control_workqueue);
+#endif
+
bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
@@ -8703,16 +8719,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
- else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
- !acrtc_state->stream->link->psr_settings.psr_allow_active) {
- struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
- acrtc_state->stream->dm_stream_context;
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (aconn->psr_skip_count > 0)
aconn->psr_skip_count--;
- else
- amdgpu_dm_psr_enable(acrtc_state->stream);
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
mutex_unlock(&dm->dc_lock);
@@ -8961,8 +8981,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) {
/* if there mode set or reset, disable eDP PSR */
- if (mode_set_reset_required)
+ if (mode_set_reset_required) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ flush_workqueue(dm->vblank_control_workqueue);
+#endif
amdgpu_dm_psr_disable_all(dm);
+ }
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index ab1670b16b02..d1d353a7c77d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -60,6 +60,7 @@ enum aux_return_code_type;
/* Forward declarations */
struct amdgpu_device;
+struct amdgpu_crtc;
struct drm_device;
struct dc;
struct amdgpu_bo;
@@ -86,16 +87,18 @@ struct dm_compressor_info {
};
/**
- * struct vblank_workqueue - Works to be executed in a separate thread during vblank
- * @mall_work: work for mall stutter
+ * struct vblank_control_work - Work data for vblank control
+ * @work: Kernel work data for the work event
* @dm: amdgpu display manager device
- * @otg_inst: otg instance of which vblank is being set
- * @enable: true if enable vblank
+ * @acrtc: amdgpu CRTC instance for which the event has occurred
+ * @stream: DC stream for which the event has occurred
+ * @enable: true if enabling vblank
*/
-struct vblank_workqueue {
- struct work_struct mall_work;
+struct vblank_control_work {
+ struct work_struct work;
struct amdgpu_display_manager *dm;
- int otg_inst;
+ struct amdgpu_crtc *acrtc;
+ struct dc_stream_state *stream;
bool enable;
};
@@ -380,11 +383,11 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
- * @vblank_workqueue:
+ * @vblank_control_workqueue:
*
- * amdgpu workqueue during vblank
+ * Deferred work for vblank control events.
*/
- struct vblank_workqueue *vblank_workqueue;
+ struct workqueue_struct *vblank_control_workqueue;
#endif
struct drm_atomic_state *cached_state;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8e39e9245d06..c5f1dc3b5961 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -79,12 +79,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
struct ta_hdcp_shared_memory *hdcp_cmd;
- if (!psp->hdcp_context.hdcp_initialized) {
+ if (!psp->hdcp_context.context.initialized) {
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
return NULL;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
@@ -105,12 +105,12 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
struct ta_hdcp_shared_memory *hdcp_cmd;
- if (!psp->hdcp_context.hdcp_initialized) {
+ if (!psp->hdcp_context.context.initialized) {
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
return -EINVAL;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
@@ -414,12 +414,12 @@ static bool enable_assr(void *handle, struct dc_link *link)
struct ta_dtm_shared_memory *dtm_cmd;
bool res = true;
- if (!psp->dtm_context.dtm_initialized) {
+ if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
return false;
}
- dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 40f617bbb86f..4aba0e8c84f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
/*allocate a new amdgpu_dm_irq_handler_data*/
- handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
if (!handler_data_add) {
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
return;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index f3b93ba69a27..79b5f9999fec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -33,6 +33,7 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params;
struct dc_stream_state *stream;
int active_planes;
+ bool allow_psr_entry;
struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5568d4e518e6..1bcba6943fd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -213,6 +213,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(
&aconnector->base,
NULL);
+
+ DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
+ if (!aconnector->dc_sink) {
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ NULL,
+ 0,
+ &init_params);
+
+ if (!dc_sink) {
+ DRM_ERROR("Unable to add a remote sink\n");
+ return 0;
+ }
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 605e297b7a59..c798c65d4276 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1481,6 +1481,22 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
return true;
}
+static inline bool should_update_pipe_for_stream(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ return (pipe_ctx->stream && pipe_ctx->stream == stream);
+}
+
+static inline bool should_update_pipe_for_plane(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ return (pipe_ctx->plane_state == plane_state);
+}
+
void dc_enable_stereo(
struct dc *dc,
struct dc_state *context,
@@ -1491,12 +1507,15 @@ void dc_enable_stereo(
struct pipe_ctx *pipe;
for (i = 0; i < MAX_PIPES; i++) {
- if (context != NULL)
+ if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
- else
+ } else {
+ context = dc->current_state;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- for (j = 0 ; pipe && j < stream_count; j++) {
- if (streams[j] && streams[j] == pipe->stream &&
+ }
+
+ for (j = 0; pipe && j < stream_count; j++) {
+ if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
dc->hwss.setup_stereo)
dc->hwss.setup_stereo(pipe, dc);
}
@@ -1530,6 +1549,12 @@ void dc_z10_restore(struct dc *dc)
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
}
+
+void dc_z10_save_init(struct dc *dc)
+{
+ if (dc->hwss.z10_save_init)
+ dc->hwss.z10_save_init(dc);
+}
#endif
/*
* Applies given context to HW and copy it into current context.
@@ -2623,6 +2648,7 @@ static void commit_planes_for_stream(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
@@ -2694,7 +2720,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, true);
else
/* Lock the top pipe while updating plane addrs, since freesync requires
@@ -2717,7 +2743,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2733,14 +2759,14 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->plane_state)
continue;
- if (pipe_ctx->plane_state != plane_state)
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
- plane_state->triplebuffer_flips = false;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
- !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
- plane_state->triplebuffer_flips = true;
+ pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
if (update_type == UPDATE_TYPE_FULL) {
@@ -2756,8 +2782,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (!pipe_ctx->top_pipe &&
!pipe_ctx->prev_odm_pipe &&
- pipe_ctx->stream &&
- pipe_ctx->stream == stream) {
+ should_update_pipe_for_stream(context, pipe_ctx, stream)) {
struct dc_stream_status *stream_status = NULL;
if (!pipe_ctx->plane_state)
@@ -2810,15 +2835,15 @@ static void commit_planes_for_stream(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (pipe_ctx->stream != stream)
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
continue;
- if (pipe_ctx->plane_state != plane_state)
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
// GSL has to be used for flip immediate
dc->hwss.set_flip_control_gsl(pipe_ctx,
- plane_state->flip_immediate);
+ pipe_ctx->plane_state->flip_immediate);
}
}
@@ -2829,25 +2854,26 @@ static void commit_planes_for_stream(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (pipe_ctx->stream != stream)
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
continue;
- if (pipe_ctx->plane_state != plane_state)
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
+
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*only enable triplebuffer for fast_update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, plane_state->triplebuffer_flips);
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
- if (srf_updates[i].flip_addr)
+ if (pipe_ctx->plane_state->update_flags.bits.addr_update)
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
}
- if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2891,7 +2917,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
- !pipe_ctx->stream || pipe_ctx->stream != stream ||
+ !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
!pipe_ctx->plane_state->update_flags.bits.addr_update ||
pipe_ctx->plane_state->skip_manual_trigger)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cd025c12f17b..330edd666b7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
dp_decide_training_settings(
link,
@@ -1707,7 +1707,7 @@ enum link_training_result dc_link_dp_perform_link_training(
bool skip_video_pattern)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum dp_link_encoding encoding =
dp_get_link_encoding_format(link_settings);
@@ -1923,7 +1923,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 327fd1909c51..f0f54f4d3d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -246,6 +246,40 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+static void program_cursor_attributes(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ int i;
+ struct resource_context *res_ctx;
+ struct pipe_ctx *pipe_to_program = NULL;
+
+ if (!stream)
+ return;
+
+ res_ctx = &dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ if (!pipe_to_program) {
+ pipe_to_program = pipe_ctx;
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ }
+
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ if (pipe_to_program)
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+}
+
#ifndef TRIM_FSFT
/*
* dc_optimize_timing_for_fsft() - dc to optimize timing
@@ -270,10 +304,7 @@ bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
{
- int i;
struct dc *dc;
- struct resource_context *res_ctx;
- struct pipe_ctx *pipe_to_program = NULL;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool reset_idle_optimizations = false;
#endif
@@ -293,7 +324,6 @@ bool dc_stream_set_cursor_attributes(
}
dc = stream->ctx->dc;
- res_ctx = &dc->current_state->res_ctx;
stream->cursor_attributes = *attributes;
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -305,11 +335,39 @@ bool dc_stream_set_cursor_attributes(
}
#endif
+ program_cursor_attributes(dc, stream, attributes);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations)
+ dc_allow_idle_optimizations(dc, true);
+
+#endif
+ return true;
+}
+
+static void program_cursor_position(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position)
+{
+ int i;
+ struct resource_context *res_ctx;
+ struct pipe_ctx *pipe_to_program = NULL;
+
+ if (!stream)
+ return;
+
+ res_ctx = &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream != stream)
+ if (pipe_ctx->stream != stream ||
+ (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
+ !pipe_ctx->plane_state ||
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+ (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
continue;
if (!pipe_to_program) {
@@ -317,31 +375,18 @@ bool dc_stream_set_cursor_attributes(
dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
- dc->hwss.set_cursor_attribute(pipe_ctx);
- if (dc->hwss.set_cursor_sdr_white_level)
- dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
dc->hwss.cursor_lock(dc, pipe_to_program, false);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
- dc_allow_idle_optimizations(dc, true);
-
-#endif
- return true;
}
bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position)
{
- int i;
struct dc *dc;
- struct resource_context *res_ctx;
- struct pipe_ctx *pipe_to_program = NULL;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool reset_idle_optimizations = false;
#endif
@@ -357,7 +402,6 @@ bool dc_stream_set_cursor_position(
}
dc = stream->ctx->dc;
- res_ctx = &dc->current_state->res_ctx;
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
@@ -370,27 +414,7 @@ bool dc_stream_set_cursor_position(
#endif
stream->cursor_position = *position;
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- if (pipe_ctx->stream != stream ||
- (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
- !pipe_ctx->plane_state ||
- (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
- (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
- continue;
-
- if (!pipe_to_program) {
- pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- }
-
- dc->hwss.set_cursor_position(pipe_ctx);
- }
-
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
-
+ program_cursor_position(dc, stream, position);
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index f2b39ec35c89..cde8ed2560b3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_z10_save_init(dc);
+#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 62c222d0402f..3ab52d9a82cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.147"
+#define DC_VER "3.2.149"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
#endif
bool dc_enable_dmub_notifications(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 058a9356a39a..e14f99b4b0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,6 +42,11 @@
#define DC_LOGGER \
engine->ctx->logger
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
+#define LOG_FLAG_Error_I2cAux LOG_ERROR
+#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
+
#include "reg_helper.h"
#undef FN
@@ -623,6 +628,58 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
#define AUX_MAX_INVALID_REPLY_RETRIES 2
#define AUX_MAX_TIMEOUT_RETRIES 3
+static void dce_aux_log_payload(const char *payload_name,
+ unsigned char *payload, uint32_t length, uint32_t max_length_to_log)
+{
+ if (!IS_DC_I2CAUX_LOGGING_ENABLED())
+ return;
+
+ if (payload && length) {
+ char hex_str[128] = {0};
+ char *hex_str_ptr = &hex_str[0];
+ uint32_t hex_str_remaining = sizeof(hex_str);
+ unsigned char *payload_ptr = payload;
+ unsigned char *payload_max_to_log_ptr = payload_ptr + min(max_length_to_log, length);
+ unsigned int count;
+ char *padding = "";
+
+ while (payload_ptr < payload_max_to_log_ptr) {
+ count = snprintf_count(hex_str_ptr, hex_str_remaining, "%s%02X", padding, *payload_ptr);
+ padding = " ";
+ hex_str_remaining -= count;
+ hex_str_ptr += count;
+ payload_ptr++;
+ }
+
+ count = snprintf_count(hex_str_ptr, hex_str_remaining, " ");
+ hex_str_remaining -= count;
+ hex_str_ptr += count;
+
+ payload_ptr = payload;
+ while (payload_ptr < payload_max_to_log_ptr) {
+ count = snprintf_count(hex_str_ptr, hex_str_remaining, "%c",
+ *payload_ptr >= ' ' ? *payload_ptr : '.');
+ hex_str_remaining -= count;
+ hex_str_ptr += count;
+ payload_ptr++;
+ }
+
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_log_payload: %s: length=%u: data: %s%s",
+ payload_name,
+ length,
+ hex_str,
+ (length > max_length_to_log ? " (...)" : " "));
+ } else {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_log_payload: %s: length=%u: data: <empty payload>",
+ payload_name,
+ length);
+ }
+}
+
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
{
@@ -648,7 +705,34 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
}
for (i = 0; i < AUX_MAX_RETRIES; i++) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
+ i + 1,
+ (int)AUX_MAX_RETRIES,
+ payload->address,
+ payload->length,
+ (unsigned int) payload->write,
+ (unsigned int) payload->mot);
+ if (payload->write)
+ dce_aux_log_payload(" write", payload->data, payload->length, 16);
ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
+ i + 1,
+ (int)AUX_MAX_RETRIES,
+ payload->address,
+ payload->length,
+ (unsigned int) payload->write,
+ (unsigned int) payload->mot,
+ ret,
+ (int)operation_result,
+ (unsigned int) *payload->reply);
+ if (!payload->write)
+ dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
switch (operation_result) {
case AUX_RET_SUCCESS:
@@ -657,30 +741,64 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
switch (*payload->reply) {
case AUX_TRANSACTION_REPLY_AUX_ACK:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_ACK");
if (!payload->write && payload->length != ret) {
- if (++aux_ack_retries >= AUX_MAX_RETRIES)
+ if (++aux_ack_retries >= AUX_MAX_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_ack_retries=%d >= AUX_MAX_RETRIES=%d",
+ aux_defer_retries,
+ AUX_MAX_RETRIES);
goto fail;
- else
+ } else {
udelay(300);
+ }
} else
return true;
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
+
/* polling_timeout_period is in us */
defer_time_in_ms += aux110->polling_timeout_period / 1000;
++aux_defer_retries;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
+
retry_on_defer = true;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
+
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d && defer_time_in_ms=%d >= AUX_MAX_DEFER_TIMEOUT_MS=%d",
+ aux_defer_retries,
+ AUX_MIN_DEFER_RETRIES,
+ defer_time_in_ms,
+ AUX_MAX_DEFER_TIMEOUT_MS);
goto fail;
} else {
if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: payload->defer_delay=%u",
+ payload->defer_delay);
if (payload->defer_delay > 1) {
msleep(payload->defer_delay);
defer_time_in_ms += payload->defer_delay;
@@ -693,37 +811,86 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_DEFER");
+
aux_defer_retries = 0;
- if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+ if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_i2c_defer_retries=%d >= AUX_MAX_I2C_DEFER_RETRIES=%d",
+ aux_i2c_defer_retries,
+ AUX_MAX_I2C_DEFER_RETRIES);
goto fail;
+ }
break;
case AUX_TRANSACTION_REPLY_AUX_NACK:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_NACK");
+ goto fail;
+
case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_HPD_DISCON");
+ goto fail;
+
default:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case.");
goto fail;
}
break;
case AUX_RET_ERROR_INVALID_REPLY:
- if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_ERROR_INVALID_REPLY");
+ if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_invalid_reply_retries=%d >= AUX_MAX_INVALID_REPLY_RETRIES=%d",
+ aux_invalid_reply_retries,
+ AUX_MAX_INVALID_REPLY_RETRIES);
goto fail;
- else
+ } else
udelay(400);
break;
case AUX_RET_ERROR_TIMEOUT:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: AUX_RET_ERROR_TIMEOUT");
// Check whether a DEFER had occurred before the timeout.
// If so, treat timeout as a DEFER.
if (retry_on_defer) {
- if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d",
+ aux_defer_retries,
+ AUX_MIN_DEFER_RETRIES);
goto fail;
- else if (payload->defer_delay > 0)
+ } else if (payload->defer_delay > 0) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: payload->defer_delay=%u",
+ payload->defer_delay);
msleep(payload->defer_delay);
+ }
} else {
- if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_timeout_retries=%d >= AUX_MAX_TIMEOUT_RETRIES=%d",
+ aux_timeout_retries,
+ AUX_MAX_TIMEOUT_RETRIES);
goto fail;
- else {
+ } else {
/*
* DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
* According to the DP spec there should be 3 retries total
@@ -738,11 +905,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: Failure: operation_result=%d",
+ (int)operation_result);
goto fail;
}
}
fail:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE");
if (!payload_reply)
payload->reply = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 1ca8b1d94bc2..aa8403bc4c83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -29,7 +29,7 @@
#include "dmub/dmub_srv.h"
#include "core_types.h"
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do {} while (0) /* do nothing */
#define MAX_PIPES 6
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 35af0401f256..df8a7718a85f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3641,13 +3641,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
struct dc_clock_config clock_cfg = {0};
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
- context, clock_type, &clock_cfg);
-
- if (!dc->clk_mgr->funcs->get_clock)
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
return DC_FAIL_UNSUPPORTED_1;
+ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
+ context, clock_type, &clock_cfg);
+
if (clk_khz > clock_cfg.max_clock_khz)
return DC_FAIL_CLK_EXCEED_MAX;
@@ -3665,7 +3664,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
else
return DC_ERROR_UNEXPECTED;
- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
+ if (dc->clk_mgr->funcs->update_clocks)
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
context, true);
return DC_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index f1a08a7736ac..cf364ae93138 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -522,16 +522,21 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_888:
REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ DC_LOG_DEBUG("HDMI source set to 24BPP deep color depth\n");
break;
case COLOR_DEPTH_101010:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 0);
+ DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
+ "disabled for YCBCR422 pixel encoding\n");
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 1);
+ DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
+ "enabled for YCBCR422 non-pixel encoding\n");
}
break;
case COLOR_DEPTH_121212:
@@ -539,16 +544,22 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 0);
+ DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
+ "disabled for YCBCR422 pixel encoding\n");
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 1);
+ DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
+ "enabled for non-pixel YCBCR422 encoding\n");
}
break;
case COLOR_DEPTH_161616:
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 3,
HDMI_DEEP_COLOR_ENABLE, 1);
+ DC_LOG_DEBUG("HDMI source deep color depth enabled in" \
+ "reserved mode\n");
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 5c2853654cca..a47ba1d45be9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1723,13 +1723,15 @@ void dcn20_program_front_end_for_ctx(
pipe = pipe->bottom_pipe;
}
- /* Program secondary blending tree and writeback pipes */
- pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
- && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
- && hws->funcs.program_all_writeback_pipes_in_tree)
- hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 3fe9e41e4dbd..6a3d3a0ec0a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -49,6 +49,11 @@
static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
struct dcn3_xfer_func_reg *reg)
{
+ reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
+ reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
+ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
+ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
+
reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
@@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
- reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
- reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
@@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
uint32_t state_mode;
uint32_t ram_select;
- REG_GET(DWB_OGAM_CONTROL,
- DWB_OGAM_MODE, &state_mode);
- REG_GET(DWB_OGAM_CONTROL,
- DWB_OGAM_SELECT, &ram_select);
+ REG_GET_2(DWB_OGAM_CONTROL,
+ DWB_OGAM_MODE_CURRENT, &state_mode,
+ DWB_OGAM_SELECT_CURRENT, &ram_select);
if (state_mode == 0) {
mode = LUT_BYPASS;
} else if (state_mode == 2) {
if (ram_select == 0)
mode = LUT_RAM_A;
- else
+ else if (ram_select == 1)
mode = LUT_RAM_B;
+ else
+ mode = LUT_BYPASS;
} else {
// Reserved value
mode = LUT_BYPASS;
@@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
struct dcn30_dwbc *dwbc30,
bool is_ram_a)
{
- REG_UPDATE(DWB_OGAM_LUT_CONTROL,
- DWB_OGAM_LUT_READ_COLOR_SEL, 7);
- REG_UPDATE(DWB_OGAM_CONTROL,
- DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
+ REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
+ DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
+
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
}
@@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
{
uint32_t i;
- // triple base implementation
- for (i = 0; i < num/2; i++) {
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
+
+ } else {
+
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
+
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
+
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
+
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
}
}
@@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
return false;
}
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
+
current_mode = dwb3_get_ogam_current(dwbc30);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
@@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
dwb3_program_ogam_pwl(
dwbc30, params->rgb_resulted, params->hw_points_num);
- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+ REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
return true;
}
@@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
struct color_matrices_reg gam_regs;
- REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
-
if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
REG_SET(DWB_GAMUT_REMAP_MODE, 0,
DWB_GAMUT_REMAP_MODE, 0);
return;
}
+ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
+
+ gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
+ gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
+ gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
+ gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
+
switch (select) {
case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 2e8ab9775fa3..fafed1e4a998 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -398,12 +398,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+ if (!pipe_ctx->plane_state)
+ continue;
+
if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
break;
}
}
- ASSERT(wb_info.mpcc_inst != -1);
+
+ if (wb_info.mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
+ continue;
+ }
ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 253654d605c2..a0de309475a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -92,7 +92,7 @@
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn3_0_ip = {
- .use_min_dcfclk = 1,
+ .use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 0,
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
}
pri_pipe->next_odm_pipe = sec_pipe;
sec_pipe->prev_odm_pipe = pri_pipe;
- ASSERT(sec_pipe->top_pipe == NULL);
if (!sec_pipe->top_pipe)
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
@@ -2399,16 +2398,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ }
+
+ if (!max_dcfclk_mhz)
+ max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+ if (!max_dispclk_mhz)
+ max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+ if (!max_dppclk_mhz)
+ max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+ if (!max_phyclk_mhz)
+ max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
- if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
- } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
- dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
+ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
@@ -2448,7 +2468,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
@@ -2463,11 +2483,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
+ dcn3_0_soc.num_states = num_states;
for (i = 0; i < dcn3_0_soc.num_states; i++) {
dcn3_0_soc.clock_limits[i].state = i;
dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
@@ -2475,9 +2496,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
- dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
- dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
- dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
@@ -2490,11 +2511,6 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
}
-
- /* re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
}
static const struct resource_funcs dcn30_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 9776d1737818..912285fdce18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
-static void calculate_wm_set_for_vlevel(
- int vlevel,
- struct wm_range_table_entry *table_entry,
- struct dcn_watermarks *wm_set,
- struct display_mode_lib *dml,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt)
-{
- double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
- ASSERT(vlevel < dml->soc.num_states);
- /* only pipe 0 is read for voltage and dcf/soc clocks */
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
- dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
- dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
- dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
- wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
- wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
- dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel_req)
-{
- int i, pipe_idx;
- int vlevel, vlevel_max;
- struct wm_range_table_entry *table_entry;
- struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
- ASSERT(bw_params);
-
- vlevel_max = bw_params->clk_table.num_entries - 1;
-
- /* WM Set D */
- table_entry = &bw_params->wm_table.entries[WM_D];
- if (table_entry->wm_type == WM_TYPE_RETRAINING)
- vlevel = 0;
- else
- vlevel = vlevel_max;
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set C */
- table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set B */
- table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- /* WM Set A */
- table_entry = &bw_params->wm_table.entries[WM_A];
- vlevel = min(vlevel_req, vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dc7823d23ba8..dd38796ba30a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
vpg = dcn303_vpg_create(ctx, vpg_inst);
afmt = dcn303_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 83f7904630e6..3f2333ec67e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -407,6 +407,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
&pipe_ctx->stream_res.encoder_info_frame);
}
}
+void dcn31_z10_save_init(struct dc *dc)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
void dcn31_z10_restore(struct dc *dc)
{
@@ -598,20 +610,3 @@ bool dcn31_is_abm_supported(struct dc *dc,
}
return false;
}
-
-static void apply_riommu_invalidation_wa(struct dc *dc)
-{
- struct dce_hwseq *hws = dc->hwseq;
-
- if (!hws->wa.early_riommu_invalidation)
- return;
-
- REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
-}
-
-void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
-{
- dcn10_init_pipes(dc, context);
- apply_riommu_invalidation_wa(dc);
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 40dfebe78fdd..140435e4f7ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index aaf2dbd095fe..40011cd3c8ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -97,13 +97,14 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
.is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
- .init_pipes = dcn31_init_pipes,
+ .init_pipes = dcn10_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 7db268da6976..3b3721386571 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -109,7 +109,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_backlight_on;
}
@@ -119,7 +119,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_powered_on;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index cd3248dc31d8..a7702d3c75cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1302,7 +1302,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
- hws->wa.early_riommu_invalidation = true;
}
return hws;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index fbed5304692d..63bbdf8b8678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2641,7 +2641,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e1a961a62add..e3d9f1decdfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3644,8 +3644,7 @@ static double TruncToValidBPP(
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
- int MinPrefetchMode = 0;
- int MaxPrefetchMode = 2;
+ int MinPrefetchMode, MaxPrefetchMode;
int i;
unsigned int j, k, m;
bool EnoughWritebackUnits = true;
@@ -3657,6 +3656,10 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ CalculateMinAndMaxPrefetchMode(
+ mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
+ &MinPrefetchMode, &MaxPrefetchMode);
+
/*Scale Ratio, taps Support Check*/
v->ScaleRatioAndTapsSupport = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 73f5be26abc4..0fad15020c74 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -244,6 +244,8 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
+ mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank =
+ soc->allow_dram_self_refresh_or_dram_clock_change_in_vblank;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
@@ -733,8 +735,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.OverrideHostVMPageTableLevels;
}
- mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank = dm_try_to_allow_self_refresh_and_mclk_switch;
-
if (mode_lib->vba.OverrideGPUVMPageTableLevels)
mode_lib->vba.GPUVMMaxPageTableLevels = mode_lib->vba.OverrideGPUVMPageTableLevels;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 5ab008e62b82..ad5f2adcc40d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
int width, int height, int offset);
void (*z10_restore)(struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 082549f75978..f7f7e4fff0c2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,7 +41,6 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
- bool early_riommu_invalidation;
};
struct hwseq_wa_state {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index aa2707e469c1..7b684e7f60df 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x6d13d5e2c
+#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 77
+#define DMUB_FW_VERSION_REVISION 79
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -322,6 +322,10 @@ union dmub_fw_boot_status {
uint32_t mailbox_rdy : 1; /**< 1 if mailbox ready */
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
uint32_t restore_required : 1; /**< 1 if driver should call restore */
+ uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
+ uint32_t reserved : 1;
+ uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
+
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -335,6 +339,7 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
+ DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
};
/* Register bit definition for SCRATCH5 */
@@ -489,6 +494,11 @@ enum dmub_gpint_command {
* RETURN: PSR residency in milli-percent.
*/
DMUB_GPINT__PSR_RESIDENCY = 9,
+
+ /**
+ * DESC: Notifies DMCUB detection is done so detection required can be cleared.
+ */
+ DMUB_GPINT__NOTIFY_DETECTION_DONE = 12,
};
/**
@@ -860,6 +870,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware restore.
*/
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+ /**
+ * DCN hardware save.
+ */
+ DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
};
/**
@@ -1438,7 +1453,7 @@ struct dmub_cmd_psr_set_level_data {
* 16-bit value dicated by driver that will enable/disable different functionality.
*/
uint16_t psr_level;
- /**
+ /**
* PSR control version.
*/
uint8_t cmd_version;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 6820012e3b6e..fc667cb17eb0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -83,7 +83,7 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
+ const uint32_t timeout = 100;
uint32_t in_reset, scratch, i;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -98,26 +98,22 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
/**
* Timeout covers both the ACK and the wait
* for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
*/
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(1);
}
for (i = 0; i < timeout; ++i) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
- }
- /* Clear the GPINT command manually so we don't reset again. */
- cmd.all = 0;
- dmub->hw_funcs.set_gpint(dmub, cmd);
+ udelay(1);
+ }
/* Force reset in case we timed out, DMCUB is likely hung. */
}
@@ -130,6 +126,10 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
+
+ /* Clear the GPINT command manually so we don't send anything during boot. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
}
void dmub_dcn31_reset_release(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 06d60f031a06..3e81850a7ffe 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -145,6 +145,7 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ set_auth_complete(hdcp, output);
}
else if (is_hdmi_dvi_sl_hdcp(hdcp))
if (is_cp_desired_hdcp2(hdcp)) {
@@ -156,10 +157,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ set_auth_complete(hdcp, output);
}
else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ set_auth_complete(hdcp, output);
}
} else if (is_in_cp_not_desired_state(hdcp)) {
increment_stay_counter(hdcp);
@@ -520,7 +523,7 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
/* reset authentication if needed */
if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
- HDCP_FULL_DDC_TRACE(hdcp);
+ mod_hdcp_log_ddc_trace(hdcp);
reset_status = reset_authentication(hdcp, output);
if (reset_status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, reset_status);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 7123f0915706..399fbca8947b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -324,6 +324,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
/* log functions */
void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
uint8_t *buf, uint32_t buf_size);
+void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp);
/* TODO: add adjustment log */
/* psp functions */
@@ -494,6 +495,13 @@ static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
output->watchdog_timer_delay = time;
}
+static inline void set_auth_complete(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ output->auth_complete = 1;
+ mod_hdcp_log_ddc_trace(hdcp);
+}
+
/* connection topology helpers */
static inline uint8_t is_display_active(struct mod_hdcp_display *display)
{
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 3dda8c1d83fc..7f011196ce98 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -89,7 +89,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
}
break;
case H1_A45_AUTHENTICATED:
@@ -137,7 +137,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
@@ -239,7 +239,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
} else {
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
}
break;
case D1_A4_AUTHENTICATED:
@@ -311,7 +311,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
break;
}
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
break;
default:
fail_and_restart_in_ms(0, &status, output);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 70cb230d8f56..1f4095b26409 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -242,7 +242,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
break;
case H2_A5_AUTHENTICATED:
if (input->rxstatus_read == FAIL ||
@@ -559,7 +559,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
break;
}
set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
- HDCP_FULL_DDC_TRACE(hdcp);
+ set_auth_complete(hdcp, output);
break;
case D2_A5_AUTHENTICATED:
if (input->rxstatus_read == FAIL ||
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 1a0f7c3dc964..6b3b5f610907 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -51,6 +51,80 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
}
}
+void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
+{
+ if (is_hdcp1(hdcp)) {
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+ HDCP_DDC_READ_TRACE(hdcp, "BSTATUS",
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'",
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO",
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+ } else if (is_hdcp2(hdcp)) {
+ HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version",
+ &hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
+ sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp,
+ sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init,
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+ HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM",
+ hdcp->auth.msg.hdcp2.ake_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+ HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM",
+ hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+ HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+ HDCP_DDC_READ_TRACE(hdcp, "Pairing Info",
+ hdcp->auth.msg.hdcp2.ake_pairing_info,
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+ HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init,
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+ HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks,
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Status",
+ (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
+ sizeof(hdcp->auth.msg.hdcp2.rxstatus));
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Id List",
+ hdcp->auth.msg.hdcp2.rx_id_list,
+ hdcp->auth.msg.hdcp2.rx_id_list_size);
+ HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack",
+ hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management",
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ hdcp->auth.msg.hdcp2.stream_manage_size);
+ HDCP_DDC_READ_TRACE(hdcp, "Stream Ready",
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type",
+ hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
+}
+
char *mod_hdcp_status_to_str(int32_t status)
{
switch (status) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 47f8ee2832ff..eb6f9b9c504a 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -106,78 +106,6 @@
hdcp->config.index, msg_name,\
hdcp->buf); \
} while (0)
-#define HDCP_FULL_DDC_TRACE(hdcp) do { \
- if (is_hdcp1(hdcp)) { \
- HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
- sizeof(hdcp->auth.msg.hdcp1.bksv)); \
- HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
- sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
- HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
- (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
- sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
- sizeof(hdcp->auth.msg.hdcp1.an)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
- sizeof(hdcp->auth.msg.hdcp1.aksv)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
- sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
- HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
- (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
- sizeof(hdcp->auth.msg.hdcp1.r0p)); \
- HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
- (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
- sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
- HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
- hdcp->auth.msg.hdcp1.ksvlist_size); \
- HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
- sizeof(hdcp->auth.msg.hdcp1.vp)); \
- } else { \
- HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
- &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
- sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
- HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
- sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
- sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
- HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
- sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
- hdcp->auth.msg.hdcp2.ake_stored_km, \
- sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
- hdcp->auth.msg.hdcp2.ake_no_stored_km, \
- sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
- HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
- sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
- HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
- hdcp->auth.msg.hdcp2.ake_pairing_info, \
- sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
- sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
- HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
- sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
- sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
- HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
- (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
- sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
- HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
- hdcp->auth.msg.hdcp2.rx_id_list, \
- hdcp->auth.msg.hdcp2.rx_id_list_size); \
- HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
- hdcp->auth.msg.hdcp2.repeater_auth_ack, \
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
- hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
- hdcp->auth.msg.hdcp2.stream_manage_size); \
- HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
- hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
- hdcp->auth.msg.hdcp2.content_stream_type_dp, \
- sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
- } \
-} while (0)
#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
hdcp->config.index, i)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index ade86a042398..e9bd84ec027d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -54,7 +54,7 @@ static enum mod_hdcp_status remove_display_from_topology_v2(
get_active_display_at_index(hdcp, index);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
@@ -90,7 +90,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
get_active_display_at_index(hdcp, index);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
@@ -128,13 +128,13 @@ static enum mod_hdcp_status add_display_to_topology_v2(
struct mod_hdcp_link *link = &hdcp->connection.link;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (!psp->dtm_context.dtm_initialized) {
+ if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
- dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -175,13 +175,13 @@ static enum mod_hdcp_status add_display_to_topology_v3(
struct mod_hdcp_link *link = &hdcp->connection.link;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (!psp->dtm_context.dtm_initialized) {
+ if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
- dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -253,12 +253,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (!psp->hdcp_context.hdcp_initialized) {
+ if (!psp->hdcp_context.context.initialized) {
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
return MOD_HDCP_STATUS_FAILURE;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -293,7 +293,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
@@ -325,7 +325,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
@@ -367,7 +367,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
@@ -393,7 +393,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
@@ -436,7 +436,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
@@ -471,7 +471,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -498,7 +498,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (!psp->hdcp_context.hdcp_initialized) {
+ if (!psp->hdcp_context.context.initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
return MOD_HDCP_STATUS_FAILURE;
}
@@ -508,7 +508,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
@@ -545,7 +545,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
@@ -579,7 +579,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -611,7 +611,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -671,7 +671,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -717,7 +717,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -750,7 +750,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -785,7 +785,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -833,7 +833,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
@@ -862,7 +862,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -914,7 +914,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -958,7 +958,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -994,7 +994,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a6eb86de8d5c..f37101f5a777 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -225,6 +225,7 @@ struct mod_hdcp_output {
uint8_t watchdog_timer_stop;
uint16_t callback_delay;
uint16_t watchdog_timer_delay;
+ uint8_t auth_complete;
};
/* used to represent per display info */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
index a485526f3a51..8474f419caa5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
@@ -38,6 +38,9 @@
#define mmCG_TACH_CTRL 0x006a
#define mmCG_TACH_CTRL_BASE_IDX 0
+#define mmCG_TACH_STATUS 0x006b
+#define mmCG_TACH_STATUS_BASE_IDX 0
+
#define mmTHM_THERMAL_INT_ENA 0x000a
#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
#define mmTHM_THERMAL_INT_CTRL 0x000b
@@ -49,4 +52,7 @@
#define mmTHM_BACO_CNTL 0x0081
#define mmTHM_BACO_CNTL_BASE_IDX 0
+#define mmCG_THERMAL_STATUS 0x006C
+#define mmCG_THERMAL_STATUS_BASE_IDX 0
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
index d130d92aee19..f2f9eae9a68f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
@@ -92,5 +92,8 @@
#define THM_TCON_THERM_TRIP__RSVD3_MASK 0x7FFFC000L
#define THM_TCON_THERM_TRIP__SW_THERM_TP_MASK 0x80000000L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 95c656d205ed..c84bd7b2cf59 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -44,6 +44,7 @@ struct kgd_mem;
enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_PREEMPT_TYPE_WAVEFRONT_SAVE
};
struct kfd_vm_fault_info {
@@ -298,6 +299,8 @@ struct kfd2kgd_calls {
void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
int *max_waves_per_cu);
+ void (*program_trap_handler_settings)(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e38b191c7b7c..bac15c466733 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -306,8 +306,8 @@ struct amd_pm_funcs {
/* export for sysfs */
void (*set_fan_control_mode)(void *handle, u32 mode);
u32 (*get_fan_control_mode)(void *handle);
- int (*set_fan_speed_percent)(void *handle, u32 speed);
- int (*get_fan_speed_percent)(void *handle, u32 *speed);
+ int (*set_fan_speed_pwm)(void *handle, u32 speed);
+ int (*get_fan_speed_pwm)(void *handle, u32 *speed);
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 769f58d5ae1a..249cb0aeb5ae 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2005,10 +2005,10 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2094,14 +2094,19 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
}
- if (asic_type == CHIP_ARCTURUS) {
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ switch (asic_type) {
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
DEVICE_ATTR_IS(pp_dpm_socclk) ||
DEVICE_ATTR_IS(pp_dpm_fclk)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
+ break;
+ default:
+ break;
}
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
@@ -2379,7 +2384,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return sprintf(buf, "%u\n", pwm_mode);
+ return sysfs_emit(buf, "%u\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -2424,14 +2429,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 0);
+ return sysfs_emit(buf, "%i\n", 0);
}
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 255);
+ return sysfs_emit(buf, "%i\n", 255);
}
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
@@ -2469,10 +2474,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return err;
}
- value = (value * 100) / 255;
-
- if (adev->powerplay.pp_funcs->set_fan_speed_percent)
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+ if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
+ err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
else
err = -EINVAL;
@@ -2504,8 +2507,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_percent)
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+ if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
+ err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
else
err = -EINVAL;
@@ -2515,9 +2518,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (err)
return err;
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
+ return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
@@ -2550,7 +2551,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%i\n", speed);
+ return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
@@ -2647,7 +2648,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%i\n", rpm);
+ return sysfs_emit(buf, "%i\n", rpm);
}
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
@@ -2729,7 +2730,7 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
+ return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
@@ -2899,7 +2900,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 0);
+ return sysfs_emit(buf, "%i\n", 0);
}
@@ -3174,6 +3175,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
+ * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
+ * That will get the former one overridden.
+ *
* hwmon interfaces for GPU clocks:
*
* - freq1_input: the gfx/compute clock in hertz
@@ -3349,13 +3353,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (!is_support_sw_smu(adev)) {
/* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
@@ -3379,8 +3383,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (!is_support_sw_smu(adev)) {
/* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index d03e6fa2bf1a..98f1b3d8c1d5 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -280,11 +280,11 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_fan_control_mode(adev) \
((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
+#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
+ ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
+#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
+ ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index c2c201b8e3cf..8156729c370b 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -34,6 +34,8 @@
#define SMU_FW_NAME_LEN 0x24
#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+#define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1)
+#define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2)
// Power Throttlers
#define SMU_THROTTLER_PPT0_BIT 0
@@ -229,7 +231,8 @@ enum smu_memory_pool_size
struct smu_user_dpm_profile {
uint32_t fan_mode;
uint32_t power_limit;
- uint32_t fan_speed_percent;
+ uint32_t fan_speed_pwm;
+ uint32_t fan_speed_rpm;
uint32_t flags;
uint32_t user_od;
@@ -540,7 +543,7 @@ struct smu_context
struct work_struct interrupt_work;
unsigned fan_max_rpm;
- unsigned manual_fan_speed_percent;
+ unsigned manual_fan_speed_pwm;
uint32_t gfx_default_hard_min_freq;
uint32_t gfx_default_soft_max_freq;
@@ -722,9 +725,14 @@ struct pptable_funcs {
bool (*is_dpm_running)(struct smu_context *smu);
/**
- * @get_fan_speed_percent: Get the current fan speed in percent.
+ * @get_fan_speed_pwm: Get the current fan speed in PWM.
*/
- int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @get_fan_speed_rpm: Get the current fan speed in rpm.
+ */
+ int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
/**
* @set_watermarks_table: Configure and upload the watermarks tables to
@@ -1043,9 +1051,14 @@ struct pptable_funcs {
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
/**
- * @set_fan_speed_percent: Set a static fan speed in percent.
+ * @set_fan_speed_pwm: Set a static fan speed in PWM.
+ */
+ int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_fan_speed_rpm: Set a static fan speed in rpm.
*/
- int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+ int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
/**
* @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
@@ -1322,6 +1335,30 @@ enum smu_cmn2asic_mapping_type {
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
+/**
+ * smu_memcpy_trailing - Copy the end of one structure into the middle of another
+ *
+ * @dst: Pointer to destination struct
+ * @first_dst_member: The member name in @dst where the overwrite begins
+ * @last_dst_member: The member name in @dst where the overwrite ends after
+ * @src: Pointer to the source struct
+ * @first_src_member: The member name in @src where the copy begins
+ *
+ */
+#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
+ src, first_src_member) \
+({ \
+ size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
+ size_t __src_size = sizeof(*(src)) - __src_offset; \
+ size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
+ size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
+ __dst_offset; \
+ BUILD_BUG_ON(__src_size != __dst_size); \
+ __builtin_memcpy((u8 *)(dst) + __dst_offset, \
+ (u8 *)(src) + __src_offset, \
+ __dst_size); \
+})
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
index 490371bd2520..8ed01071fe5a 100644
--- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
@@ -278,9 +278,9 @@ struct pp_hwmgr_func {
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
- int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
- int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
- int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
+ int (*set_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t speed);
+ int (*get_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
+ int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t speed);
int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr);
int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 6239c30fcd5f..6f1b1b50d527 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -298,7 +298,6 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(DS_FCLK), \
__SMU_DUMMY_MAP(DS_MP1CLK), \
__SMU_DUMMY_MAP(DS_MP0CLK), \
- __SMU_DUMMY_MAP(XGMI), \
__SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \
__SMU_DUMMY_MAP(DPM_GFX_PACE), \
__SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 403bc1bf8a77..cbdae8a2c698 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -221,9 +221,18 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
-int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_pwm(struct smu_context *smu,
uint32_t speed);
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed);
+
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed);
+
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index d2a38246a78a..321215003643 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -533,7 +533,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
return mode;
}
-static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
+static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -541,17 +541,17 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
+ if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
+ ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
+static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -559,13 +559,13 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
+ if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
+ ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1691,8 +1691,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
- .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
+ .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 7dd92bdf7ffb..1de3ae77e03e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
i = 1;
- size += sprintf(buf + size, "0: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
data->gfx_min_freq_limit/100,
i == 0 ? "*" : "");
- size += sprintf(buf + size, "1: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
- size += sprintf(buf + size, "2: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
data->gfx_max_freq_limit/100,
i == 2 ? "*" : "");
break;
@@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i,
mclk_table->entries[i].clk / 100,
((mclk_table->entries[i].clk / 100)
@@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size = sprintf(buf, "%s:\n", "OD_SCLK");
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
@@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
break;
@@ -1456,11 +1456,11 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
- size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 0541bfc81c1b..e7803ce8f67a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,6 +27,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/intel-family.h>
+#endif
#include <drm/amdgpu_drm.h>
#include "ppatomctrl.h"
#include "atombios.h"
@@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static bool intel_core_rkl_chk(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
+#else
+ return false;
+#endif
+}
+
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled =
+ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
@@ -3212,7 +3227,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
if (!ret) {
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
}
@@ -4896,8 +4911,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
+ int size = 0;
+ uint32_t i, now, clock, pcie_speed;
switch (type) {
case PP_SCLK:
@@ -4911,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4926,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4940,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@@ -4948,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
- size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100,
odn_sclk_table->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
- size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100,
odn_mclk_table->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
@@ -4988,7 +5003,7 @@ static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
switch (mode) {
case AMD_FAN_CTRL_NONE:
- smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -5503,7 +5518,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
+ size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
title[0], title[1], title[2], title[3],
title[4], title[5], title[6], title[7]);
@@ -5511,7 +5526,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i < len; i++) {
if (i == hwmgr->power_profile_mode) {
- size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+ size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
i, profile_name[i], "*",
data->current_profile_setting.sclk_up_hyst,
data->current_profile_setting.sclk_down_hyst,
@@ -5522,21 +5537,21 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
continue;
}
if (smu7_profiling[i].bupdate_sclk)
- size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
+ size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
smu7_profiling[i].sclk_down_hyst,
smu7_profiling[i].sclk_activity);
else
- size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
+ size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
i, profile_name[i], "-", "-", "-");
if (smu7_profiling[i].bupdate_mclk)
- size += sprintf(buf + size, "%16d %16d %16d\n",
+ size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
smu7_profiling[i].mclk_up_hyst,
smu7_profiling[i].mclk_down_hyst,
smu7_profiling[i].mclk_activity);
else
- size += sprintf(buf + size, "%16s %16s %16s\n",
+ size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
"-", "-", "-");
}
@@ -5692,8 +5707,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
+ .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index 6cfe148ed45b..a6c3610db23e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -51,7 +51,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
return 0;
}
-int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t duty100;
@@ -70,12 +70,9 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
return -EINVAL;
- tmp64 = (uint64_t)duty * 100;
+ tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = (uint32_t)tmp64;
-
- if (*speed > 100)
- *speed = 100;
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
@@ -199,12 +196,11 @@ int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
}
/**
- * smu7_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
+ * smu7_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
* @hwmgr: the address of the powerplay hardware manager.
- * @speed: is the percentage value (0% - 100%) to be set.
- * Exception: Fails is the 100% setting appears to be 0.
+ * @speed: is the pwm value (0 - 255) to be set.
*/
-int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
uint32_t duty100;
@@ -214,8 +210,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (speed > 100)
- speed = 100;
+ speed = MIN(speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -227,7 +222,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
+ do_div(tmp64, 255);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
index 42c1ba0fad78..a386a437e1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
@@ -41,10 +41,10 @@
extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index d425b02b1418..b94a77e4e714 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1547,7 +1547,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *sclk_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
- int i, now, size = 0;
+ uint32_t i, now;
+ int size = 0;
switch (type) {
case PP_SCLK:
@@ -1558,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
@@ -1570,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 25979106fd25..c152a61ddd2c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4199,7 +4199,7 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
switch (mode) {
case AMD_FAN_CTRL_NONE:
- vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ vega10_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
break;
case AMD_FAN_CTRL_MANUAL:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
@@ -4553,13 +4553,13 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
return ret);
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
+ size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+ size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+ size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -4650,7 +4650,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
count = sclk_table->count;
for (i = 0; i < count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4661,7 +4661,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4672,7 +4672,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4684,7 +4684,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
@@ -4698,7 +4698,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sprintf(buf + size, "%d: %s %s %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -4717,34 +4717,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
@@ -5112,21 +5112,28 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]);
for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
- size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
- size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
+ size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
data->custom_profile_mode[0], data->custom_profile_mode[1],
data->custom_profile_mode[2], data->custom_profile_mode[3]);
return size;
}
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (adev->pdev->device == 0x6860);
+}
+
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}
out:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ if (vega10_get_power_profile_mode_quirks(hwmgr))
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode,
+ NULL);
+ else
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);
+
hwmgr->power_profile_mode = power_profile_mode;
return 0;
@@ -5523,8 +5536,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.force_dpm_level = vega10_dpm_force_dpm_level,
.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_pwm = vega10_fan_ctrl_get_fan_speed_pwm,
+ .set_fan_speed_pwm = vega10_fan_ctrl_set_fan_speed_pwm,
.reset_fan_speed_to_default =
vega10_fan_ctrl_reset_fan_speed_to_default,
.get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 9b46b27bd30c..dad3e3741a4e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -64,7 +64,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
return 0;
}
-int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t current_rpm;
@@ -78,11 +78,11 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 100 /
+ percent = current_rpm * 255 /
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM;
- *speed = percent > 100 ? 100 : percent;
+ *speed = MIN(percent, 255);
return 0;
}
@@ -241,12 +241,11 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
}
/**
- * vega10_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
+ * vega10_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
* @hwmgr: the address of the powerplay hardware manager.
- * @speed: is the percentage value (0% - 100%) to be set.
- * Exception: Fails is the 100% setting appears to be 0.
+ * @speed: is the percentage value (0 - 255) to be set.
*/
-int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -257,8 +256,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (speed > 100)
- speed = 100;
+ speed = MIN(speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -270,7 +268,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
+ do_div(tmp64, 255);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
index 4a0ede7c1f07..6850a21a2991 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
@@ -54,12 +54,12 @@ extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info);
-extern int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
uint32_t mode);
-extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed);
extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 29e0d1d4035a..8558718e15a8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2146,13 +2146,13 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
return ret);
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
+ size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+ size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+ size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -2256,7 +2256,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get gfx clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2272,7 +2272,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get memory clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2290,7 +2290,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get soc clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
@@ -2308,7 +2308,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get dcef clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 0791309586c5..0cf39c1244b1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -2769,7 +2769,7 @@ static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
switch (mode) {
case AMD_FAN_CTRL_NONE:
- vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
break;
case AMD_FAN_CTRL_MANUAL:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
@@ -3243,13 +3243,13 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
return ret);
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
+ size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+ size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+ size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -3372,13 +3372,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_sclks(hwmgr, &clocks)) {
- size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3390,13 +3390,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_memclocks(hwmgr, &clocks)) {
- size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3408,13 +3408,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_socclocks(hwmgr, &clocks)) {
- size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3426,7 +3426,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
for (i = 0; i < fclk_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, fclk_dpm_table->dpm_levels[i].value,
fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
break;
@@ -3438,13 +3438,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_dcefclocks(hwmgr, &clocks)) {
- size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3458,7 +3458,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -3479,18 +3479,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
od_table->GfxclkFmin);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
od_table->GfxclkFmax);
}
break;
case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size = sprintf(buf, "%s:\n", "OD_MCLK");
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
od_table->UclkFmax);
}
@@ -3503,14 +3503,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
- size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
- size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
od_table->GfxclkFreq2,
od_table->GfxclkVolt2 / VOLTAGE_SCALE);
- size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
od_table->GfxclkFreq3,
od_table->GfxclkVolt3 / VOLTAGE_SCALE);
}
@@ -3518,17 +3518,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
- size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3539,22 +3539,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
}
@@ -4003,7 +4003,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
@@ -4016,10 +4016,10 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
"[GetPowerProfile] Failed to get activity monitor!",
return result);
- size += sprintf(buf + size, "%2d %14s%s:\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
0,
"GFXCLK",
@@ -4033,7 +4033,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
activity_monitor.Gfx_PD_Data_error_coeff,
activity_monitor.Gfx_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
@@ -4047,7 +4047,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
activity_monitor.Soc_PD_Data_error_coeff,
activity_monitor.Soc_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"UCLK",
@@ -4061,7 +4061,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
activity_monitor.Mem_PD_Data_error_coeff,
activity_monitor.Mem_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
3,
"FCLK",
@@ -4409,8 +4409,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.register_irq_handlers = smu9_register_irq_handlers,
.disable_smc_firmware_ctf = vega20_thermal_disable_alert,
/* fan control related */
- .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm,
+ .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm,
.get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
.get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
.set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index 269dd7e95a44..f4f4efdbda79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -114,26 +114,29 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
return 0;
}
-int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
- PPTable_t *pp_table = &(data->smc_state_table.pp_table);
- uint32_t current_rpm, percent = 0;
- int ret = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- ret = vega20_get_current_rpm(hwmgr, &current_rpm);
- if (ret)
- return ret;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- percent = current_rpm * 100 / pp_table->FanMaximumRpm;
+ if (!duty100)
+ return -EINVAL;
- *speed = percent > 100 ? 100 : percent;
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
-int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -141,8 +144,7 @@ int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t duty;
uint64_t tmp64;
- if (speed > 100)
- speed = 100;
+ speed = MIN(speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -154,7 +156,7 @@ int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
+ do_div(tmp64, 255);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
index 2d1769bbd24e..b18d09cf761e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
@@ -56,9 +56,9 @@ extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t speed);
-extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
-extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed);
extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index 15c0b8af376f..bdbbeb959c68 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -6539,7 +6539,7 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int si_dpm_get_fan_speed_percent(void *handle,
+static int si_dpm_get_fan_speed_pwm(void *handle,
u32 *speed)
{
u32 duty, duty100;
@@ -6555,17 +6555,14 @@ static int si_dpm_get_fan_speed_percent(void *handle,
if (duty100 == 0)
return -EINVAL;
- tmp64 = (u64)duty * 100;
+ tmp64 = (u64)duty * 255;
do_div(tmp64, duty100);
- *speed = (u32)tmp64;
-
- if (*speed > 100)
- *speed = 100;
+ *speed = MIN((u32)tmp64, 255);
return 0;
}
-static int si_dpm_set_fan_speed_percent(void *handle,
+static int si_dpm_set_fan_speed_pwm(void *handle,
u32 speed)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6580,7 +6577,7 @@ static int si_dpm_set_fan_speed_percent(void *handle,
if (si_pi->fan_is_controlled_by_smc)
return -EINVAL;
- if (speed > 100)
+ if (speed > 255)
return -EINVAL;
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
@@ -6589,7 +6586,7 @@ static int si_dpm_set_fan_speed_percent(void *handle,
return -EINVAL;
tmp64 = (u64)speed * duty100;
- do_div(tmp64, 100);
+ do_div(tmp64, 255);
duty = (u32)tmp64;
tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
@@ -8059,8 +8056,8 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
- .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm,
+ .get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm,
.check_state_equal = &si_check_state_equal,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.read_sensor = &si_dpm_read_sensor,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 71afc2d20b12..3ab1ce4d3419 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -58,7 +58,7 @@ static int smu_handle_task(struct smu_context *smu,
enum amd_pp_task task_id,
bool lock_needed);
static int smu_reset(struct smu_context *smu);
-static int smu_set_fan_speed_percent(void *handle, u32 speed);
+static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(struct smu_context *smu, int value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
@@ -403,17 +403,26 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
}
/* set the user dpm fan configurations */
- if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
+ if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
+ smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
if (ret) {
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
- return;
}
- if (!ret && smu->user_dpm_profile.fan_speed_percent) {
- ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
+ if (smu->user_dpm_profile.fan_speed_pwm) {
+ ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
if (ret)
- dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
+ dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
+ }
+
+ if (smu->user_dpm_profile.fan_speed_rpm) {
+ ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
}
}
@@ -620,6 +629,7 @@ static int smu_early_init(void *handle)
mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
+ smu->user_dpm_profile.fan_mode = -1;
adev->powerplay.pp_handle = smu;
adev->powerplay.pp_funcs = &swsmu_pm_funcs;
@@ -2179,7 +2189,6 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
{
struct smu_context *smu = handle;
- u32 percent;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2187,11 +2196,16 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_percent) {
- percent = speed * 100 / smu->fan_max_rpm;
- ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.fan_speed_percent = percent;
+ if (smu->ppt_funcs->set_fan_speed_rpm) {
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = speed;
+
+ /* Override custom PWM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ }
}
mutex_unlock(&smu->mutex);
@@ -2551,8 +2565,11 @@ static int smu_set_fan_control_mode(struct smu_context *smu, int value)
/* reset user dpm fan speed */
if (!ret && value != AMD_FAN_CTRL_MANUAL &&
- !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.fan_speed_percent = 0;
+ !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+ }
return ret;
}
@@ -2565,31 +2582,25 @@ static void smu_pp_set_fan_control_mode(void *handle, u32 value)
}
-static int smu_get_fan_speed_percent(void *handle, u32 *speed)
+static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
{
struct smu_context *smu = handle;
int ret = 0;
- uint32_t percent;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_percent) {
- ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
- if (!ret) {
- *speed = percent > 100 ? 100 : percent;
- }
- }
+ if (smu->ppt_funcs->get_fan_speed_pwm)
+ ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
mutex_unlock(&smu->mutex);
-
return ret;
}
-static int smu_set_fan_speed_percent(void *handle, u32 speed)
+static int smu_set_fan_speed_pwm(void *handle, u32 speed)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2599,12 +2610,16 @@ static int smu_set_fan_speed_percent(void *handle, u32 speed)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_percent) {
- if (speed > 100)
- speed = 100;
- ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.fan_speed_percent = speed;
+ if (smu->ppt_funcs->set_fan_speed_pwm) {
+ ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = speed;
+
+ /* Override custom RPM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ }
}
mutex_unlock(&smu->mutex);
@@ -2616,17 +2631,14 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
{
struct smu_context *smu = handle;
int ret = 0;
- u32 percent;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_percent) {
- ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
- *speed = percent * smu->fan_max_rpm / 100;
- }
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
mutex_unlock(&smu->mutex);
@@ -3043,8 +3055,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_pp_set_fan_control_mode,
.get_fan_control_mode = smu_get_fan_control_mode,
- .set_fan_speed_percent = smu_set_fan_speed_percent,
- .get_fan_speed_percent = smu_get_fan_speed_percent,
+ .set_fan_speed_pwm = smu_set_fan_speed_pwm,
+ .get_fan_speed_pwm = smu_get_fan_speed_pwm,
.force_clock_level = smu_force_ppclk_levels,
.print_clock_levels = smu_print_ppclk_levels,
.force_performance_level = smu_force_performance_level,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 6ec8492f71f5..e343cc218990 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -81,6 +81,24 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+#define mmCG_FDO_CTRL0_ARCT 0x8B
+#define mmCG_FDO_CTRL0_ARCT_BASE_IDX 0
+
+#define mmCG_FDO_CTRL1_ARCT 0x8C
+#define mmCG_FDO_CTRL1_ARCT_BASE_IDX 0
+
+#define mmCG_FDO_CTRL2_ARCT 0x8D
+#define mmCG_FDO_CTRL2_ARCT_BASE_IDX 0
+
+#define mmCG_TACH_CTRL_ARCT 0x8E
+#define mmCG_TACH_CTRL_ARCT_BASE_IDX 0
+
+#define mmCG_TACH_STATUS_ARCT 0x8F
+#define mmCG_TACH_STATUS_ARCT_BASE_IDX 0
+
+#define mmCG_THERMAL_STATUS_ARCT 0x90
+#define mmCG_THERMAL_STATUS_ARCT_BASE_IDX 0
+
static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -163,14 +181,14 @@ static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT
FEA_MAP(DPM_SOCCLK),
FEA_MAP(DPM_FCLK),
FEA_MAP(DPM_MP0CLK),
- ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
+ FEA_MAP(DPM_XGMI),
FEA_MAP(DS_GFXCLK),
FEA_MAP(DS_SOCCLK),
FEA_MAP(DS_LCLK),
FEA_MAP(DS_FCLK),
FEA_MAP(DS_UCLK),
FEA_MAP(GFX_ULV),
- ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT),
+ ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN_BIT),
FEA_MAP(RSMU_SMN_CG),
FEA_MAP(WAFL_CG),
FEA_MAP(PPT),
@@ -465,10 +483,8 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 6))
- memcpy(&smc_pptable->MaxVoltageStepGfx,
- &smc_dpm_table->maxvoltagestepgfx,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
-
+ smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,
+ smc_dpm_table, maxvoltagestepgfx);
return 0;
}
@@ -721,13 +737,13 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
member_type = METRICS_AVERAGE_SOCCLK;
break;
case PPCLK_VCLK:
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT))
member_type = METRICS_CURR_VCLK;
else
member_type = METRICS_AVERAGE_VCLK;
break;
case PPCLK_DCLK:
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT))
member_type = METRICS_CURR_DCLK;
else
member_type = METRICS_AVERAGE_DCLK;
@@ -756,7 +772,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
uint32_t gen_speed, lane_width;
if (amdgpu_ras_intr_triggered())
- return snprintf(buf, PAGE_SIZE, "unavailable\n");
+ return sysfs_emit(buf, "unavailable\n");
dpm_context = smu_dpm->dpm_context;
@@ -780,7 +796,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
* And it's safe to assume that is always the current clock.
*/
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
clocks.data[i].clocks_in_khz / 1000,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -803,7 +819,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -826,7 +842,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -849,7 +865,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -872,7 +888,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -895,7 +911,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(arcturus_freqs_in_same_level(
@@ -906,7 +922,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
case SMU_PCIE:
gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
- size += sprintf(buf + size, "0: %s %s %dMhz *\n",
+ size += sysfs_emit_at(buf, size, "0: %s %s %dMhz *\n",
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -1162,11 +1178,29 @@ static int arcturus_read_sensor(struct smu_context *smu,
return ret;
}
-static int arcturus_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int arcturus_set_fan_static_mode(struct smu_context *smu,
+ uint32_t mode)
{
- int ret;
- u32 rpm;
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),
+ CG_FDO_CTRL2, TMIN, 0));
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+ return 0;
+}
+
+static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_status;
+ uint64_t tmp64;
+ int ret = 0;
if (!speed)
return -EINVAL;
@@ -1175,14 +1209,112 @@ static int arcturus_get_fan_speed_percent(struct smu_context *smu,
case AMD_FAN_CTRL_AUTO:
ret = arcturus_get_smu_metrics_data(smu,
METRICS_CURR_FANSPEED,
- &rpm);
- if (!ret && smu->fan_max_rpm)
- *speed = rpm * 100 / smu->fan_max_rpm;
- return ret;
+ speed);
+ break;
default:
- *speed = smu->user_dpm_profile.fan_speed_percent;
+ /*
+ * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+ * detected via register retrieving. To workaround this, we will
+ * report the fan speed as 0 RPM if user just requested such.
+ */
+ if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
+ && !smu->user_dpm_profile.fan_speed_rpm) {
+ *speed = 0;
+ return 0;
+ }
+
+ tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
+ tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS_ARCT);
+ if (tach_status) {
+ do_div(tmp64, tach_status);
+ *speed = (uint32_t)tmp64;
+ } else {
+ *speed = 0;
+ }
+
+ break;
+ }
+
+ return ret;
+}
+
+static int arcturus_set_fan_speed_pwm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ speed = MIN(speed, 255);
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 255);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ /*
+ * crystal_clock_freq used for fan speed rpm calculation is
+ * always 25Mhz. So, hardcode it as 2500(in 10K unit).
+ */
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_period;
+
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+}
+
+static int arcturus_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ /*
+ * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+ * detected via register retrieving. To workaround this, we will
+ * report the fan speed as 0 PWM if user just requested such.
+ */
+ if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
+ && !smu->user_dpm_profile.fan_speed_pwm) {
+ *speed = 0;
return 0;
}
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS_ARCT),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+ if (duty100) {
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
+ } else {
+ *speed = 0;
+ }
+
+ return 0;
}
static int arcturus_get_fan_parameters(struct smu_context *smu)
@@ -1272,11 +1404,11 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
return result;
if (smu_version >= 0x360d00)
- size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
else
- size += sprintf(buf + size, "%16s\n",
+ size += sysfs_emit_at(buf, size, "%16s\n",
title[0]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -1302,11 +1434,11 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
}
}
- size += sprintf(buf + size, "%2d %14s%s\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
if (smu_version >= 0x360d00) {
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
0,
"GFXCLK",
@@ -1320,7 +1452,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
activity_monitor.Gfx_PD_Data_error_coeff,
activity_monitor.Gfx_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"UCLK",
@@ -1916,16 +2048,16 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 1);
if (ret) {
dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
return ret;
}
}
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 0);
if (ret) {
dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
return ret;
@@ -2270,7 +2402,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.print_clk_levels = arcturus_print_clk_levels,
.force_clk_levels = arcturus_force_clk_levels,
.read_sensor = arcturus_read_sensor,
- .get_fan_speed_percent = arcturus_get_fan_speed_percent,
+ .get_fan_speed_pwm = arcturus_get_fan_speed_pwm,
+ .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
.set_performance_level = arcturus_set_performance_level,
@@ -2315,7 +2448,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_pwm = arcturus_set_fan_speed_pwm,
+ .set_fan_speed_rpm = arcturus_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index d7722c229ddd..a5fc5d7cb6c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -431,16 +431,16 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
switch (smc_dpm_table->table_header.content_revision) {
case 5: /* nv10 and nv14 */
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
break;
case 7: /* nv12 */
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table_v4_7);
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
- sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table_v4_7, I2cControllers);
break;
default:
dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
@@ -1303,7 +1303,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
if (ret)
return size;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
}
} else {
@@ -1321,7 +1321,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
for (i = 0; i < 3; i++) {
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
i == mark_index ? "*" : "");
}
@@ -1331,7 +1331,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
for (i = 0; i < NUM_LINK_LEVELS; i++)
- size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
@@ -1352,23 +1352,24 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
- size += sprintf(buf + size, "OD_SCLK:\n");
- size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ od_table->GfxclkFmin, od_table->GfxclkFmax);
break;
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
- size += sprintf(buf + size, "OD_MCLK:\n");
- size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "1: %uMHz\n", od_table->UclkFmax);
break;
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
- size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
switch (i) {
case 0:
@@ -1383,55 +1384,57 @@ static int navi10_print_clk_levels(struct smu_context *smu,
default:
break;
}
- size += sprintf(buf + size, "%d: %uMHz %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n",
+ i, curve_settings[0],
+ curve_settings[1] / NAVI10_VOLTAGE_SCALE);
}
break;
case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
NULL, &max_value);
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
&min_value, &max_value);
- size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ min_value, max_value);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ min_value, max_value);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ min_value, max_value);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ min_value, max_value);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ min_value, max_value);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
&min_value, &max_value);
- size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
- min_value, max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ min_value, max_value);
}
break;
@@ -1668,27 +1671,27 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int navi10_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int navi10_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
{
- int ret;
- u32 rpm;
+ int ret = 0;
if (!speed)
return -EINVAL;
switch (smu_v11_0_get_fan_control_mode(smu)) {
case AMD_FAN_CTRL_AUTO:
- ret = navi1x_get_smu_metrics_data(smu,
+ ret = navi10_get_smu_metrics_data(smu,
METRICS_CURR_FANSPEED,
- &rpm);
- if (!ret && smu->fan_max_rpm)
- *speed = rpm * 100 / smu->fan_max_rpm;
- return ret;
+ speed);
+ break;
default:
- *speed = smu->user_dpm_profile.fan_speed_percent;
- return 0;
+ ret = smu_v11_0_get_fan_speed_rpm(smu,
+ speed);
+ break;
}
+
+ return ret;
}
static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -1730,7 +1733,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
@@ -1750,10 +1753,10 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
return result;
}
- size += sprintf(buf + size, "%2d %14s%s:\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
0,
"GFXCLK",
@@ -1767,7 +1770,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
activity_monitor.Gfx_PD_Data_error_coeff,
activity_monitor.Gfx_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
@@ -1781,7 +1784,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
activity_monitor.Soc_PD_Data_error_coeff,
activity_monitor.Soc_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"MEMLK",
@@ -3224,7 +3227,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_config_changed = navi10_display_config_changed,
.notify_smc_display_config = navi10_notify_smc_display_config,
.is_dpm_running = navi10_is_dpm_running,
- .get_fan_speed_percent = navi10_get_fan_speed_percent,
+ .get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
+ .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
.set_watermarks_table = navi10_set_watermarks_table,
@@ -3267,7 +3271,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 261ef8ca862e..5e292c3f5050 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1088,7 +1088,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (ret)
goto print_clk_out;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
}
} else {
@@ -1110,7 +1110,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < count; i++) {
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
cur_value == freq_values[i] ? "*" : "");
}
@@ -1121,7 +1121,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
GET_PPTABLE_MEMBER(LclkFreq, &table_member);
for (i = 0; i < NUM_LINK_LEVELS; i++)
- size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
@@ -1144,8 +1144,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
break;
- size += sprintf(buf + size, "OD_SCLK:\n");
- size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
break;
case SMU_OD_MCLK:
@@ -1155,8 +1155,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
break;
- size += sprintf(buf + size, "OD_MCLK:\n");
- size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
break;
case SMU_OD_VDDGFX_OFFSET:
@@ -1172,22 +1172,22 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
(smu_version < 0x003a2900))
break;
- size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
- size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n", od_table->VddGfxOffset);
break;
case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
NULL, &max_value);
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
@@ -1196,7 +1196,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
&min_value, NULL);
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
NULL, &max_value);
- size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
break;
@@ -1354,27 +1354,20 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
{
- int ret;
- u32 rpm;
-
if (!speed)
return -EINVAL;
- switch (smu_v11_0_get_fan_control_mode(smu)) {
- case AMD_FAN_CTRL_AUTO:
- ret = sienna_cichlid_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- &rpm);
- if (!ret && smu->fan_max_rpm)
- *speed = rpm * 100 / smu->fan_max_rpm;
- return ret;
- default:
- *speed = smu->user_dpm_profile.fan_speed_percent;
- return 0;
- }
+ /*
+ * For Sienna_Cichlid and later, the fan speed(rpm) reported
+ * by pmfw is always trustable(even when the fan control feature
+ * disabled or 0 RPM kicked in).
+ */
+ return sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
}
static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
@@ -1419,7 +1412,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
@@ -1439,10 +1432,10 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
return result;
}
- size += sprintf(buf + size, "%2d %14s%s:\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
0,
"GFXCLK",
@@ -1456,7 +1449,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
activity_monitor->Gfx_PD_Data_error_coeff,
activity_monitor->Gfx_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
@@ -1470,7 +1463,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
activity_monitor->Fclk_PD_Data_error_coeff,
activity_monitor->Fclk_PD_Data_error_rate_coeff);
- size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"MEMLK",
@@ -3859,7 +3852,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_config_changed = sienna_cichlid_display_config_changed,
.notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
.is_dpm_running = sienna_cichlid_is_dpm_running,
- .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
+ .get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
+ .get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
.get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
.set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
.set_watermarks_table = sienna_cichlid_set_watermarks_table,
@@ -3902,7 +3896,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b5419e8eba89..87b055466a33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1200,17 +1200,13 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
}
int
-smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t duty100, duty;
uint64_t tmp64;
- if (speed > 100)
- speed = 100;
-
- if (smu_v11_0_auto_fan_control(smu, 0))
- return -EINVAL;
+ speed = MIN(speed, 255);
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -1218,7 +1214,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
+ do_div(tmp64, 255);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
@@ -1228,6 +1224,99 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ /*
+ * crystal_clock_freq used for fan speed rpm calculation is
+ * always 25Mhz. So, hardcode it as 2500(in 10K unit).
+ */
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_period;
+
+ /*
+ * To prevent from possible overheat, some ASICs may have requirement
+ * for minimum fan speed:
+ * - For some NV10 SKU, the fan speed cannot be set lower than
+ * 700 RPM.
+ * - For some Sienna Cichlid SKU, the fan speed cannot be set
+ * lower than 500 RPM.
+ */
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+}
+
+int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ /*
+ * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+ * detected via register retrieving. To workaround this, we will
+ * report the fan speed as 0 PWM if user just requested such.
+ */
+ if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
+ && !smu->user_dpm_profile.fan_speed_pwm) {
+ *speed = 0;
+ return 0;
+ }
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
+
+ return 0;
+}
+
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_status;
+ uint64_t tmp64;
+
+ /*
+ * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+ * detected via register retrieving. To workaround this, we will
+ * report the fan speed as 0 RPM if user just requested such.
+ */
+ if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
+ && !smu->user_dpm_profile.fan_speed_rpm) {
+ *speed = 0;
+ return 0;
+ }
+
+ tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
+
+ tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS);
+ if (tach_status) {
+ do_div(tmp64, tach_status);
+ *speed = (uint32_t)tmp64;
+ } else {
+ dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n");
+ *speed = 0;
+ }
+
+ return 0;
+}
+
int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
@@ -1236,7 +1325,9 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+ ret = smu_v11_0_auto_fan_control(smu, 0);
+ if (!ret)
+ ret = smu_v11_0_set_fan_speed_pwm(smu, 255);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index bcaaa086fc2f..3a3421452e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -592,28 +592,28 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
}
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
}
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
}
break;
@@ -656,14 +656,14 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
return ret;
if (!value)
continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
cur_value_match_level = true;
}
if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break;
default:
break;
@@ -691,28 +691,28 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
}
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
}
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
}
break;
@@ -755,14 +755,14 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return ret;
if (!value)
continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
cur_value_match_level = true;
}
if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break;
default:
break;
@@ -1035,7 +1035,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
- size += sprintf(buf + size, "%2d %14s%s\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
@@ -1869,7 +1869,7 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9a9c24a6ec35..5aa175e12a78 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -426,7 +426,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
@@ -510,16 +510,16 @@ static int renoir_print_clk_levels(struct smu_context *smu,
0, &max);
if (ret)
return ret;
- size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
}
break;
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sprintf(buf + size, "OD_SCLK\n");
- size += sprintf(buf + size, "0:%10uMhz\n", min);
- size += sprintf(buf + size, "1:%10uMhz\n", max);
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
}
break;
case SMU_GFXCLK:
@@ -536,12 +536,12 @@ static int renoir_print_clk_levels(struct smu_context *smu,
else
i = 1;
- size += sprintf(buf + size, "0: %uMhz %s\n", min,
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
i == 0 ? "*" : "");
- size += sprintf(buf + size, "1: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
- size += sprintf(buf + size, "2: %uMhz %s\n", max,
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
}
return size;
@@ -588,14 +588,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
if (!value)
continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
cur_value_match_level = true;
}
if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break;
default:
@@ -1118,7 +1118,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
- size += sprintf(buf + size, "%2d %14s%s\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 562783d0302f..ab652028e003 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -90,8 +90,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
- MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 0),
- MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 0),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
@@ -156,14 +156,14 @@ static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUN
ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK_BIT),
- ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
+ ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT, FEATURE_DS_UCLK_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, FEATURE_GFX_SS_BIT),
- ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT),
+ ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, FEATURE_RSMU_SMN_CG_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, FEATURE_WAFL_CG_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT_BIT),
@@ -409,9 +409,8 @@ static int aldebaran_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 10))
- memcpy(&smc_pptable->GfxMaxCurrent,
- &smc_dpm_table->GfxMaxCurrent,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
+ smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
+ smc_dpm_table, GfxMaxCurrent);
return 0;
}
@@ -735,14 +734,14 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered())
- return snprintf(buf, PAGE_SIZE, "unavailable\n");
+ return sysfs_emit(buf, "unavailable\n");
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
- size = sprintf(buf, "%s:\n", "GFXCLK");
+ size = sysfs_emit(buf, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -779,8 +778,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
*/
if (display_levels == clocks.num_levels) {
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(
- buf + size, "%d: %uMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
freq_values[i],
(clocks.num_levels == 1) ?
"*" :
@@ -790,14 +788,14 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
""));
} else {
for (i = 0; i < display_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
freq_values[i], i == 1 ? "*" : "");
}
break;
case SMU_OD_MCLK:
- size = sprintf(buf, "%s:\n", "MCLK");
+ size = sysfs_emit(buf, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
@@ -814,7 +812,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.num_levels == 1) ? "*" :
(aldebaran_freqs_in_same_level(
@@ -837,7 +835,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.num_levels == 1) ? "*" :
(aldebaran_freqs_in_same_level(
@@ -860,7 +858,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(aldebaran_freqs_in_same_level(
@@ -883,7 +881,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(aldebaran_freqs_in_same_level(
@@ -906,7 +904,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
}
for (i = 0; i < single_dpm_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
(clocks.num_levels == 1) ? "*" :
(aldebaran_freqs_in_same_level(
@@ -1194,8 +1192,19 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
uint32_t power_limit = 0;
int ret;
- if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
- return -EINVAL;
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (current_power_limit)
+ *current_power_limit = 0;
+ if (default_power_limit)
+ *default_power_limit = 0;
+ if (max_power_limit)
+ *max_power_limit = 0;
+
+ dev_warn(smu->adev->dev,
+ "PPT feature is not enabled, power values can't be fetched.");
+
+ return 0;
+ }
/* Valid power data is available only from primary die.
* For secondary die show the value as 0.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a421ba85bd6d..a0e50f23b1dd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -85,6 +85,10 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_ALDEBARAN:
chip_name = "aldebaran";
@@ -268,52 +272,86 @@ static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
return 0;
}
-int smu_v13_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
- const struct smc_firmware_header_v1_0 *hdr;
- int ret, index;
- uint32_t size = 0;
uint16_t atom_table_size;
uint8_t frev, crev;
- void *table;
- uint16_t version_major, version_minor;
+ int ret, index;
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
- if (amdgpu_smu_pptable_id >= 0) {
- smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
- dev_info(adev->dev, "override pptable id %d\n", amdgpu_smu_pptable_id);
- }
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)table);
+ if (ret)
+ return ret;
+
+ if (size)
+ *size = atom_table_size;
+
+ return 0;
+}
+
+static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
+ uint32_t pptable_id)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t version_major, version_minor;
+ int ret;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ if (!hdr)
+ return -EINVAL;
+
+ dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
- if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
- dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
- switch (version_minor) {
- case 1:
- ret = smu_v13_0_set_pptable_v2_1(smu, &table, &size,
- smu->smu_table.boot_values.pp_table_id);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret)
- return ret;
+ if (version_major != 2) {
+ dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+ version_major, version_minor);
+ return -EINVAL;
+ }
- } else {
- dev_info(adev->dev, "use vbios provided pptable\n");
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- powerplayinfo);
+ switch (version_minor) {
+ case 1:
+ ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
- (uint8_t **)&table);
- if (ret)
- return ret;
- size = atom_table_size;
+ return ret;
+}
+
+int smu_v13_0_setup_pptable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t size = 0, pptable_id = 0;
+ void *table;
+ int ret = 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
}
+ /* force using vbios pptable in sriov mode */
+ if (amdgpu_sriov_vf(adev) || !pptable_id)
+ ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
+ else
+ ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+ if (ret)
+ return ret;
+
if (!smu->smu_table.power_play_table)
smu->smu_table.power_play_table = table;
if (!smu->smu_table.power_play_table_size)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0cfeb9fc7c03..627ba2eec7fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -572,7 +572,7 @@ static int yellow_carp_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
- size += sprintf(buf + size, "%2d %14s%s\n",
+ size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
@@ -731,7 +731,7 @@ static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
@@ -1054,15 +1054,15 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
- size = sprintf(buf, "%s:\n", "OD_SCLK");
- size += sprintf(buf + size, "0: %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sprintf(buf + size, "1: %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- size = sprintf(buf, "%s:\n", "OD_RANGE");
- size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break;
case SMU_SOCCLK:
@@ -1083,7 +1083,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
if (ret)
goto print_clk_out;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
}
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 415be74df28c..66711ab24c15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -710,7 +710,7 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
return 0;
}
- size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+ size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
feature_mask[1], feature_mask[0]);
memset(sort_feature, -1, sizeof(sort_feature));
@@ -725,14 +725,14 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
sort_feature[feature_index] = i;
}
- size += sprintf(buf + size, "%-2s. %-20s %-3s : %-s\n",
+ size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
"No", "Feature", "Bit", "State");
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
if (sort_feature[i] < 0)
continue;
- size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+ size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
smu_get_feature_name(smu, sort_feature[i]),
i,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index f0a07278ad04..7dcc6392792d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -468,17 +468,7 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = etnaviv_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(fops);
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -487,7 +477,7 @@ static const struct drm_driver etnaviv_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
- .gem_prime_mmap = etnaviv_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 003288ebd896..049ae87de9be 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -47,12 +47,9 @@ struct etnaviv_drm_private {
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
-int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index b8fa6ed3dd73..8f1b5af47dd6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -130,8 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
{
pgprot_t vm_page_prot;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -154,19 +153,11 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
return 0;
}
-int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct etnaviv_gem_object *obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- obj = to_etnaviv_bo(vma->vm_private_data);
- return obj->ops->mmap(obj, vma);
+ return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
}
static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
@@ -567,6 +558,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.unpin = etnaviv_gem_prime_unpin,
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
+ .mmap = etnaviv_gem_mmap,
.vm_ops = &vm_ops,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index d741b1d735f7..6d8bed9c739d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -34,19 +34,6 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
-int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
-}
-
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4102bcea3341..c297fffe06eb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -613,6 +613,12 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
etnaviv_is_model_rev(gpu, GC2000, 0x5108))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+ /* Disable SE, RA and TX clock gating on affected core revisions. */
+ if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE |
+ VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA |
+ VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index dfc0f536b3b9..f2fc645c7956 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -39,6 +39,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
},
{
.model = 0x7000,
+ .revision = 0x6202,
+ .product_id = 0x70003,
+ .customer_id = 0,
+ .eco_id = 0,
+ .stream_count = 8,
+ .register_max = 64,
+ .thread_count = 512,
+ .shader_core_count = 2,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cad,
+ .minor_features0 = 0xc1489eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0x3b5ac333,
+ .minor_features6 = 0xfccee201,
+ .minor_features7 = 0x03fffa6f,
+ .minor_features8 = 0x00e10ef0,
+ .minor_features9 = 0x0088003c,
+ .minor_features10 = 0x00004040,
+ .minor_features11 = 0x00000024,
+ },
+ {
+ .model = 0x7000,
.revision = 0x6204,
.product_id = ~0U,
.customer_id = ~0U,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 0644936afee2..bf33c3084cb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -115,6 +115,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
+ else
+ mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index cab4d2c370a7..b00230626c6a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -4,6 +4,7 @@
* Authors: Joonyoung Shim <jy0922.shim@samsung.com>
*/
+#include <linux/refcount.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
@@ -208,7 +209,7 @@ struct g2d_cmdlist_userptr {
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
- atomic_t refcount;
+ refcount_t refcount;
bool in_pool;
bool out_of_list;
};
@@ -386,9 +387,9 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
if (force)
goto out;
- atomic_dec(&g2d_userptr->refcount);
+ refcount_dec(&g2d_userptr->refcount);
- if (atomic_read(&g2d_userptr->refcount) > 0)
+ if (refcount_read(&g2d_userptr->refcount) > 0)
return;
if (g2d_userptr->in_pool)
@@ -436,7 +437,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
* and different size.
*/
if (g2d_userptr->size == size) {
- atomic_inc(&g2d_userptr->refcount);
+ refcount_inc(&g2d_userptr->refcount);
*obj = g2d_userptr;
return &g2d_userptr->dma_addr;
@@ -461,7 +462,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
if (!g2d_userptr)
return ERR_PTR(-ENOMEM);
- atomic_set(&g2d_userptr->refcount, 1);
+ refcount_set(&g2d_userptr->refcount, 1);
g2d_userptr->size = size;
start = userptr & PAGE_MASK;
@@ -897,13 +898,14 @@ static void g2d_runqueue_worker(struct work_struct *work)
ret = pm_runtime_resume_and_get(g2d->dev);
if (ret < 0) {
dev_err(g2d->dev, "failed to enable G2D device.\n");
- return;
+ goto out;
}
g2d_dma_start(g2d, g2d->runqueue_node);
}
}
+out:
mutex_unlock(&g2d->runqueue_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b25c596f6f7e..a35424133f1b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2226,6 +2226,9 @@ static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_stat
if (crtc_state->has_psr2)
return false;
+ if (crtc_state->splitter.enable)
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index 51dbe0e3294e..d2969f68dd64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -6,7 +6,7 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
-#include <stddef.h>
+#include <linux/stddef.h>
struct intel_engine_cs;
struct intel_gt;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index d0a7c934fd3b..1dac21aa7e5c 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -177,7 +177,7 @@ static struct intel_context *pinned_context(struct intel_gt *gt)
ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
I915_GEM_HWS_MIGRATE,
&key, "migrate");
- i915_vm_put(ce->vm);
+ i915_vm_put(vm);
return ce;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 08f011f893b2..2c1ed32ca5ac 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -462,7 +462,7 @@ static int igt_reset_nop_engine(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
- pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+ pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
return PTR_ERR(ce);
}
@@ -577,7 +577,7 @@ static int igt_reset_fail_engine(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
- pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+ pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
return PTR_ERR(ce);
}
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index c578ea8f56a0..d8b4482c69d0 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -47,19 +47,30 @@ static const struct {
int (*init)(void);
void (*exit)(void);
} init_funcs[] = {
- { i915_check_nomodeset, NULL },
- { i915_active_module_init, i915_active_module_exit },
- { i915_buddy_module_init, i915_buddy_module_exit },
- { i915_context_module_init, i915_context_module_exit },
- { i915_gem_context_module_init, i915_gem_context_module_exit },
- { i915_objects_module_init, i915_objects_module_exit },
- { i915_request_module_init, i915_request_module_exit },
- { i915_scheduler_module_init, i915_scheduler_module_exit },
- { i915_vma_module_init, i915_vma_module_exit },
- { i915_mock_selftests, NULL },
- { i915_pmu_init, i915_pmu_exit },
- { i915_register_pci_driver, i915_unregister_pci_driver },
- { i915_perf_sysctl_register, i915_perf_sysctl_unregister },
+ { .init = i915_check_nomodeset },
+ { .init = i915_active_module_init,
+ .exit = i915_active_module_exit },
+ { .init = i915_buddy_module_init,
+ .exit = i915_buddy_module_exit },
+ { .init = i915_context_module_init,
+ .exit = i915_context_module_exit },
+ { .init = i915_gem_context_module_init,
+ .exit = i915_gem_context_module_exit },
+ { .init = i915_objects_module_init,
+ .exit = i915_objects_module_exit },
+ { .init = i915_request_module_init,
+ .exit = i915_request_module_exit },
+ { .init = i915_scheduler_module_init,
+ .exit = i915_scheduler_module_exit },
+ { .init = i915_vma_module_init,
+ .exit = i915_vma_module_exit },
+ { .init = i915_mock_selftests },
+ { .init = i915_pmu_init,
+ .exit = i915_pmu_exit },
+ { .init = i915_register_pci_driver,
+ .exit = i915_unregister_pci_driver },
+ { .init = i915_perf_sysctl_register,
+ .exit = i915_perf_sysctl_unregister },
};
static int init_progress;
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index dc54a7a69005..29098d7c8307 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-mediatek-drm-y := mtk_disp_ccorr.o \
+mediatek-drm-y := mtk_disp_aal.o \
+ mtk_disp_ccorr.o \
mtk_disp_color.o \
mtk_disp_gamma.o \
mtk_disp_ovl.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
new file mode 100644
index 000000000000..64b45284766a
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_AAL_EN 0x0000
+#define AAL_EN BIT(0)
+#define DISP_AAL_SIZE 0x0030
+
+
+struct mtk_disp_aal_data {
+ bool has_gamma;
+};
+
+/**
+ * struct mtk_disp_aal - DISP_AAL driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_aal {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+ const struct mtk_disp_aal_data *data;
+};
+
+int mtk_aal_clk_enable(struct device *dev)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(aal->clk);
+}
+
+void mtk_aal_clk_disable(struct device *dev)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aal->clk);
+}
+
+void mtk_aal_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_SIZE);
+}
+
+void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ if (aal->data && aal->data->has_gamma)
+ mtk_gamma_set_common(aal->regs, state);
+}
+
+void mtk_aal_start(struct device *dev)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ writel(AAL_EN, aal->regs + DISP_AAL_EN);
+}
+
+void mtk_aal_stop(struct device *dev)
+{
+ struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, aal->regs + DISP_AAL_EN);
+}
+
+static int mtk_disp_aal_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_aal_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_aal_component_ops = {
+ .bind = mtk_disp_aal_bind,
+ .unbind = mtk_disp_aal_unbind,
+};
+
+static int mtk_disp_aal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_aal *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get aal clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap aal\n");
+ return PTR_ERR(priv->regs);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_aal_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_aal_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_aal_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_aal_data mt8173_aal_driver_data = {
+ .has_gamma = true,
+};
+
+static const struct of_device_id mtk_disp_aal_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = &mt8173_aal_driver_data},
+ { .compatible = "mediatek,mt8183-disp-aal"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
+
+struct platform_driver mtk_disp_aal_driver = {
+ .probe = mtk_disp_aal_probe,
+ .remove = mtk_disp_aal_remove,
+ .driver = {
+ .name = "mediatek-disp-aal",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_aal_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index cafd9df2d63b..86c3068894b1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -9,6 +9,15 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_plane.h"
+int mtk_aal_clk_enable(struct device *dev);
+void mtk_aal_clk_disable(struct device *dev);
+void mtk_aal_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state);
+void mtk_aal_start(struct device *dev);
+void mtk_aal_stop(struct device *dev);
+
void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state);
int mtk_ccorr_clk_enable(struct device *dev);
void mtk_ccorr_clk_disable(struct device *dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 705f28ceb4dd..75d7f45579e2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -162,10 +162,10 @@ void mtk_rdma_config(struct device *dev, unsigned int width,
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
* Keep the FIFO pseudo size reset default of 8 KiB. Set the
- * output threshold to 6 microseconds with 7/6 overhead to
- * account for blanking, and with a pixel depth of 4 bytes:
+ * output threshold to 70% of max fifo size to make sure the
+ * threhold will not overflow
*/
- threshold = width * height * vrefresh * 4 * 7 / 1000000;
+ threshold = rdma_fifo_size * 7 / 10;
reg = RDMA_FIFO_UNDERFLOW_EN |
RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 474efb844249..c5fb8fa914b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -4,6 +4,8 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -50,8 +52,11 @@ struct mtk_drm_crtc {
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_client *cmdq_client;
+ struct mbox_client cmdq_cl;
+ struct mbox_chan *cmdq_chan;
+ struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
+ u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -222,9 +227,79 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static void ddp_cmdq_cb(struct cmdq_cb_data data)
+static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
+ size_t size)
{
- cmdq_pkt_destroy(data.data);
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ pkt->buf_size = size;
+
+ dev = chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return -ENOMEM;
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return 0;
+}
+
+static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
+{
+ dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+
+static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+{
+ struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
+ struct cmdq_cb_data *data = mssg;
+ struct mtk_crtc_state *state;
+ unsigned int i;
+
+ state = to_mtk_crtc_state(mtk_crtc->base.state);
+
+ state->pending_config = false;
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.config = false;
+ }
+ mtk_crtc->pending_planes = false;
+ }
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+
+ mtk_crtc->cmdq_vblank_cnt = 0;
+ mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
}
#endif
@@ -378,7 +453,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0,
cmdq_handle);
- state->pending_config = false;
+ if (!cmdq_handle)
+ state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
@@ -398,9 +474,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.config = false;
+ if (!cmdq_handle)
+ plane_state->pending.config = false;
}
- mtk_crtc->pending_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
@@ -420,9 +499,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.async_config = false;
+ if (!cmdq_handle)
+ plane_state->pending.async_config = false;
}
- mtk_crtc->pending_async_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_async_planes = false;
}
}
@@ -430,7 +512,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_pkt *cmdq_handle;
+ struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -468,14 +550,24 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (mtk_crtc->cmdq_client) {
- mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
- cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ if (mtk_crtc->cmdq_chan) {
+ mbox_flush(mtk_crtc->cmdq_chan, 2000);
+ cmdq_handle->cmd_buf_size = 0;
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
+ cmdq_handle->pa_base,
+ cmdq_handle->cmd_buf_size,
+ DMA_TO_DEVICE);
+ /*
+ * CMDQ command should execute in next vblank,
+ * If it fail to execute in next 2 vblank, timeout happen.
+ */
+ mtk_crtc->cmdq_vblank_cnt = 2;
+ mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
+ mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
}
#endif
mtk_crtc->config_updating = false;
@@ -489,12 +581,15 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
+ mtk_crtc_ddp_config(crtc, NULL);
+ else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
+ DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
+ drm_crtc_index(&mtk_crtc->base));
#else
if (!priv->data->shadow_register)
-#endif
mtk_crtc_ddp_config(crtc, NULL);
-
+#endif
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -755,14 +850,22 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
for (i = 0; i < path_len; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct device_node *node;
+ struct mtk_ddp_comp *comp;
node = priv->comp_node[comp_id];
+ comp = &priv->ddp_comp[comp_id];
+
if (!node) {
dev_info(dev,
"Not creating crtc %d because component %d is disabled or missing\n",
pipe, comp_id);
return 0;
}
+
+ if (!comp->dev) {
+ dev_err(dev, "Component %pOF not initialized\n", node);
+ return -ENODEV;
+ }
}
mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
@@ -787,16 +890,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct mtk_ddp_comp *comp;
- struct device_node *node;
- node = priv->comp_node[comp_id];
comp = &priv->ddp_comp[comp_id];
- if (!comp) {
- dev_err(dev, "Component %pOF not initialized\n", node);
- ret = -ENODEV;
- return ret;
- }
-
mtk_crtc->ddp_comp[i] = comp;
if (comp->funcs) {
@@ -832,16 +927,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_crtc->cmdq_client =
- cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base));
- if (IS_ERR(mtk_crtc->cmdq_client)) {
+ mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
+ mtk_crtc->cmdq_cl.tx_block = false;
+ mtk_crtc->cmdq_cl.knows_txdone = true;
+ mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
+ mtk_crtc->cmdq_chan =
+ mbox_request_channel(&mtk_crtc->cmdq_cl,
+ drm_crtc_index(&mtk_crtc->base));
+ if (IS_ERR(mtk_crtc->cmdq_chan)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
- mtk_crtc->cmdq_client = NULL;
+ mtk_crtc->cmdq_chan = NULL;
}
- if (mtk_crtc->cmdq_client) {
+ if (mtk_crtc->cmdq_chan) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
@@ -849,8 +948,18 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
- cmdq_mbox_destroy(mtk_crtc->cmdq_client);
- mtk_crtc->cmdq_client = NULL;
+ mbox_free_channel(mtk_crtc->cmdq_chan);
+ mtk_crtc->cmdq_chan = NULL;
+ } else {
+ ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mbox_free_channel(mtk_crtc->cmdq_chan);
+ mtk_crtc->cmdq_chan = NULL;
+ }
}
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 75bc00e17fc4..99cbf44463e4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -32,9 +32,6 @@
#define DISP_REG_UFO_START 0x0000
-#define DISP_AAL_EN 0x0000
-#define DISP_AAL_SIZE 0x0030
-
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
#define DISP_DITHER_CFG 0x0020
@@ -48,8 +45,6 @@
#define UFO_BYPASS BIT(2)
-#define AAL_EN BIT(0)
-
#define DISP_DITHERING BIT(2)
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
@@ -190,36 +185,6 @@ static void mtk_ufoe_start(struct device *dev)
writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
-static void mtk_aal_config(struct device *dev, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
-{
- struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-
- mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
-}
-
-static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
-{
- struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-
- mtk_gamma_set_common(priv->regs, state);
-}
-
-static void mtk_aal_start(struct device *dev)
-{
- struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-
- writel(AAL_EN, priv->regs + DISP_AAL_EN);
-}
-
-static void mtk_aal_stop(struct device *dev)
-{
- struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-
- writel_relaxed(0x0, priv->regs + DISP_AAL_EN);
-}
-
static void mtk_dither_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
@@ -247,8 +212,8 @@ static void mtk_dither_stop(struct device *dev)
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
- .clk_enable = mtk_ddp_clk_enable,
- .clk_disable = mtk_ddp_clk_disable,
+ .clk_enable = mtk_aal_clk_enable,
+ .clk_disable = mtk_aal_clk_disable,
.gamma_set = mtk_aal_gamma_set,
.config = mtk_aal_config,
.start = mtk_aal_start,
@@ -505,7 +470,8 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
return ret;
}
- if (type == MTK_DISP_BLS ||
+ if (type == MTK_DISP_AAL ||
+ type == MTK_DISP_BLS ||
type == MTK_DISP_CCORR ||
type == MTK_DISP_COLOR ||
type == MTK_DISP_GAMMA ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 9b60bec33d3b..aec39724ebeb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -110,6 +110,17 @@ static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = {
DDP_COMPONENT_PWM2,
};
+static enum mtk_ddp_comp_id mt8167_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_DSI0,
+};
+
static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
@@ -172,6 +183,11 @@ static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
};
+static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
+ .main_path = mt8167_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8167_mtk_ddp_main),
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -294,16 +310,7 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
component_unbind_all(drm->dev, drm);
}
-static const struct file_operations mtk_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = mtk_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
-};
+DEFINE_DRM_GEM_FOPS(mtk_drm_fops);
/*
* We need to override this because the device used to import the memory is
@@ -326,7 +333,7 @@ static const struct drm_driver mtk_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
- .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
@@ -392,6 +399,8 @@ static const struct component_master_ops mtk_drm_ops = {
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8167-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8183-disp-ovl",
@@ -400,30 +409,46 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_OVL_2L },
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8167-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma",
.data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8167-disp-ccorr",
+ .data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8167-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
.data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8167-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-aal",
.data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8183-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8167-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8167-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8183-disp-dither",
.data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8173-disp-ufoe",
.data = (void *)MTK_DISP_UFOE },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8167-dsi",
+ .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8183-dsi",
@@ -438,12 +463,16 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8167-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
+ { .compatible = "mediatek,mt8167-disp-pwm",
+ .data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-pwm",
.data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-od",
@@ -458,6 +487,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt7623_mmsys_driver_data},
{ .compatible = "mediatek,mt2712-mmsys",
.data = &mt2712_mmsys_driver_data},
+ { .compatible = "mediatek,mt8167-mmsys",
+ .data = &mt8167_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
{ .compatible = "mediatek,mt8183-mmsys",
@@ -526,11 +557,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
+ * Currently only the AAL, CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
* blocks have separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
- if (comp_type == MTK_DISP_CCORR ||
+ if (comp_type == MTK_DISP_AAL ||
+ comp_type == MTK_DISP_CCORR ||
comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_GAMMA ||
comp_type == MTK_DISP_OVL ||
@@ -630,6 +662,7 @@ static struct platform_driver mtk_drm_platform_driver = {
};
static struct platform_driver * const mtk_drm_drivers[] = {
+ &mtk_disp_aal_driver,
&mtk_disp_ccorr_driver,
&mtk_disp_color_driver,
&mtk_disp_gamma_driver,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 637f5669e895..3e7d1e6fbe01 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -46,6 +46,7 @@ struct mtk_drm_private {
struct drm_atomic_state *suspend_state;
};
+extern struct platform_driver mtk_disp_aal_driver;
extern struct platform_driver mtk_disp_ccorr_driver;
extern struct platform_driver mtk_disp_color_driver;
extern struct platform_driver mtk_disp_gamma_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 280ea0d5e840..d0544962cfc1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -14,11 +14,14 @@
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
+static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
.free = mtk_drm_gem_free_object,
.get_sg_table = mtk_gem_prime_get_sg_table,
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
+ .mmap = mtk_drm_gem_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -146,10 +149,18 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
struct mtk_drm_private *priv = obj->dev->dev_private;
/*
+ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+ * whole buffer from the start.
+ */
+ vma->vm_pgoff = 0;
+
+ /*
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
@@ -159,37 +170,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
return ret;
}
-int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- return mtk_drm_gem_object_mmap(obj, vma);
-}
-
-int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- obj = vma->vm_private_data;
-
- /*
- * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
- * whole buffer from the start.
- */
- vma->vm_pgoff = 0;
-
- return mtk_drm_gem_object_mmap(obj, vma);
-}
-
/*
* Allocate a sg_table for this GEM object.
* Note: Both the table's contents, and the sg_table itself must be freed by
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 6da5ccb4b933..9a359a06cb73 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -39,9 +39,6 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
bool alloc_kmap);
int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index dc7f3e40850b..e9c6af78b1d7 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -116,9 +116,9 @@ config DRM_MSM_DSI_10NM_PHY
Choose this option if DSI PHY on SDM845 is used on the platform.
config DRM_MSM_DSI_7NM_PHY
- bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
+ bool "Enable DSI 7nm PHY driver in MSM DRM"
depends on DRM_MSM_DSI
default y
help
- Choose this option if DSI PHY on SM8150/SM8250 is used on the
- platform.
+ Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
+ the platform.
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 0a93ed1d6b06..5e2750eb3810 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+ }
+}
+
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
bool sync)
{
@@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* Most flush operations need to issue a WHERE_AM_I opcode to sync up
* the rptr shadow
*/
- if (a5xx_gpu->has_whereami && sync) {
- OUT_PKT7(ring, CP_WHERE_AM_I, 2);
- OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
- OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
- }
+ if (sync)
+ update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
@@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
ibs++;
break;
}
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
}
/*
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d7cec7f0dde0..a7c58018959f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (!pdcptr)
goto err;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
pdc_in_aop = true;
- else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
else
pdc_address_offset = 0x30080;
@@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
+ clk_set_rate(gmu->hub_clk, 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
@@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "gmu");
+ gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "hub");
+
return 0;
}
@@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
- if (adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
if (ret)
goto err_memory;
@@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_16M - SZ_16K, 0x04000);
if (ret)
goto err_memory;
- } else if (adreno_is_a640(adreno_gpu)) {
+ } else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_256K - SZ_16K, 0x04000);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 71dfa60070cc..3c74f64e3126 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -66,6 +66,7 @@ struct a6xx_gmu {
int nr_clocks;
struct clk_bulk_data *clocks;
struct clk *core_clk;
+ struct clk *hub_clk;
/* current performance index set externally */
int current_perf_index;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 989301230f14..40c9fef457a4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -52,21 +52,25 @@ static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return true;
}
-static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- uint32_t wptr;
- unsigned long flags;
/* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
}
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
@@ -145,7 +149,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
- unsigned int i;
+ unsigned int i, ibs = 0;
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
@@ -181,8 +185,19 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
break;
}
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
}
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
@@ -652,7 +667,7 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
regs = a650_protect;
count = ARRAY_SIZE(a650_protect);
count_max = 48;
- } else if (adreno_is_a660(adreno_gpu)) {
+ } else if (adreno_is_a660_family(adreno_gpu)) {
regs = a660_protect;
count = ARRAY_SIZE(a660_protect);
count_max = 48;
@@ -683,7 +698,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a618(adreno_gpu))
return;
- if (adreno_is_a640(adreno_gpu))
+ if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
@@ -694,6 +709,13 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv = 2;
}
+ if (adreno_is_7c3(adreno_gpu)) {
+ lower_bit = 1;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
@@ -740,6 +762,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
u32 *buf = msm_gem_get_vaddr(obj);
bool ret = false;
@@ -756,8 +779,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
*
* a660 targets have all the critical security fixes from the start
*/
- if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
- adreno_is_a640(adreno_gpu)) {
+ if (!strcmp(sqe_name, "a630_sqe.fw")) {
/*
* If the lowest nibble is 0xa that is an indication that this
* microcode has been patched. The actual version is in dword
@@ -778,7 +800,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
- } else if (adreno_is_a650(adreno_gpu)) {
+ } else if (!strcmp(sqe_name, "a650_sqe.fw")) {
if ((buf[0] & 0xfff) >= 0x095) {
ret = true;
goto out;
@@ -787,7 +809,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x095);
- } else if (adreno_is_a660(adreno_gpu)) {
+ } else if (!strcmp(sqe_name, "a660_sqe.fw")) {
ret = true;
} else {
DRM_DEV_ERROR(&gpu->pdev->dev,
@@ -897,7 +919,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
@@ -935,13 +958,14 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
else
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
- if (adreno_is_a660(adreno_gpu))
+ if (adreno_is_a660_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
@@ -952,8 +976,10 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
*/
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
@@ -990,13 +1016,15 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Protect registers from the CP */
a6xx_set_cp_protect(gpu);
- if (adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
- /* Set dualQ + disable afull for A660 GPU but not for A635 */
- gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
}
+ /* Set dualQ + disable afull for A660 GPU */
+ if (adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
@@ -1383,13 +1411,13 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- u32 cntl1_regval = 0;
+ u32 gpu_scid, cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
- u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+ gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
@@ -1409,26 +1437,34 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
}
- if (cntl1_regval) {
+ if (!cntl1_regval)
+ return;
+
+ /*
+ * Program the slice IDs for the various GPU blocks and GPU MMU
+ * pagetables
+ */
+ if (!a6xx_gpu->have_mmu500) {
+ a6xx_llc_write(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
+
/*
- * Program the slice IDs for the various GPU blocks and GPU MMU
- * pagetables
+ * Program cacheability overrides to not allocate cache
+ * lines on a write miss
*/
- if (a6xx_gpu->have_mmu500)
- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0),
- cntl1_regval);
- else {
- a6xx_llc_write(a6xx_gpu,
- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
-
- /*
- * Program cacheability overrides to not allocate cache
- * lines on a write miss
- */
- a6xx_llc_rmw(a6xx_gpu,
- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
- }
+ a6xx_llc_rmw(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
+ return;
}
+
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1667,11 +1703,11 @@ static u32 a618_get_speed_bin(u32 fuse)
return UINT_MAX;
}
-static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
{
u32 val = UINT_MAX;
- if (revn == 618)
+ if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
val = a618_get_speed_bin(fuse);
if (val == UINT_MAX) {
@@ -1684,14 +1720,13 @@ static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
return (1 << val);
}
-static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
- u32 revn)
+static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
{
u32 supp_hw = UINT_MAX;
- u16 speedbin;
+ u32 speedbin;
int ret;
- ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
+ ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
@@ -1704,9 +1739,8 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
ret);
goto done;
}
- speedbin = le16_to_cpu(speedbin);
- supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
+ supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
done:
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
@@ -1772,12 +1806,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
*/
info = adreno_info(config->rev);
- if (info && (info->revn == 650 || info->revn == 660))
+ if (info && (info->revn == 650 || info->revn == 660 ||
+ adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
a6xx_llc_slices_init(pdev, a6xx_gpu);
- ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+ ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 919433732b43..d4c65bf0a1b7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x50088;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5006c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg);
- else if (adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a640_family(adreno_gpu))
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg);
+ else if (adreno_is_7c3(adreno_gpu))
+ adreno_7c3_build_bw_table(&msg);
else if (adreno_is_a660(adreno_gpu))
a660_build_bw_table(&msg);
else
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 6dad8015c9a1..2a6ce76656aa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -8,8 +8,6 @@
#include "adreno_gpu.h"
-#define ANY_ID 0xff
-
bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
@@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 3, 5, ANY_ID),
+ .name = "Adreno 7c Gen 3",
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .hwcg = a660_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 8, 0, ANY_ID),
+ .revn = 680,
+ .name = "A680",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
},
};
@@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id)
return (entry == ANY_ID) || (entry == id);
}
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
+{
+
+ return _rev_match(rev1.core, rev2.core) &&
+ _rev_match(rev1.major, rev2.major) &&
+ _rev_match(rev1.minor, rev2.minor) &&
+ _rev_match(rev1.patchid, rev2.patchid);
+}
+
const struct adreno_info *adreno_info(struct adreno_rev rev)
{
int i;
@@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev)
/* identify gpu: */
for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i];
- if (_rev_match(info->rev.core, rev.core) &&
- _rev_match(info->rev.major, rev.major) &&
- _rev_match(info->rev.minor, rev.minor) &&
- _rev_match(info->rev.patchid, rev.patchid))
+ if (adreno_cmp_rev(info->rev, rev))
return info;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8dbe0d157520..225c277a6223 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -42,6 +42,8 @@ struct adreno_rev {
uint8_t patchid;
};
+#define ANY_ID 0xff
+
#define ADRENO_REV(core, major, minor, patchid) \
((struct adreno_rev){ core, major, minor, patchid })
@@ -141,6 +143,8 @@ struct adreno_platform_config {
__ret; \
})
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
+
static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
{
return (gpu->revn < 300);
@@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630;
}
-static inline int adreno_is_a640(struct adreno_gpu *gpu)
+static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
{
- return gpu->revn == 640;
+ return (gpu->revn == 640) || (gpu->revn == 680);
}
static inline int adreno_is_a650(struct adreno_gpu *gpu)
@@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu)
return gpu->revn == 650;
}
+static inline int adreno_is_7c3(struct adreno_gpu *gpu)
+{
+ /* The order of args is important here to handle ANY_ID correctly */
+ return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
+}
+
static inline int adreno_is_a660(struct adreno_gpu *gpu)
{
return gpu->revn == 660;
}
+static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
+{
+ return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
+}
+
/* check for a650, a660, or any derivatives */
static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
{
- return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660;
+ return gpu->revn == 650 || gpu->revn == 620 ||
+ adreno_is_a660_family(gpu);
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 9a5c70c87cc8..768012243b44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -30,12 +30,6 @@
#include "dpu_core_perf.h"
#include "dpu_trace.h"
-#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
-#define DPU_DRM_BLEND_OP_OPAQUE 1
-#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
-#define DPU_DRM_BLEND_OP_COVERAGE 3
-#define DPU_DRM_BLEND_OP_MAX 4
-
/* layer mixer index on dpu_crtc */
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
@@ -146,20 +140,43 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op;
+ uint32_t fg_alpha, bg_alpha;
- /* default to opaque blending */
- blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
- DPU_BLEND_BG_ALPHA_BG_CONST;
+ fg_alpha = pstate->base.alpha >> 8;
+ bg_alpha = 0xff - fg_alpha;
- if (format->alpha_enable) {
+ /* default to opaque blending */
+ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
+ !format->alpha_enable) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+ } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ } else {
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
- DPU_BLEND_BG_ALPHA_FG_PIXEL |
- DPU_BLEND_BG_INV_ALPHA;
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_FG_MOD_ALPHA |
+ DPU_BLEND_FG_INV_MOD_ALPHA |
+ DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
}
lm->ops.setup_blend_config(lm, pstate->stage,
- 0xFF, 0, blend_op);
+ fg_alpha, bg_alpha, blend_op);
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 1c04b7cce43e..0e9d3fa1544b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -274,20 +274,20 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d",
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx);
return -EWOULDBLOCK;
}
if (irq->irq_idx < 0) {
- DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s",
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
DRMID(phys_enc->parent), intr_idx,
irq->name);
return 0;
}
- DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
@@ -303,8 +303,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
if (irq_status) {
unsigned long flags;
- DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
- "irq=%d, pp=%d, atomic_cnt=%d",
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
@@ -315,8 +314,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
ret = 0;
} else {
ret = -ETIMEDOUT;
- DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
- "irq=%d, pp=%d, atomic_cnt=%d",
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index d01c4c919504..2e482cdd7b3c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.amortizable_threshold = 25,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sdm845_qos_linear),
.entries = sdm845_qos_linear
@@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.min_dram_ib = 1600000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
@@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
.min_dram_ib = 800000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sm8150_qos_linear),
.entries = sm8150_qos_linear
@@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
.min_dram_ib = 800000,
.min_prefill_lines = 35,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
@@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
.min_dram_ib = 1600000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
.entries = sc7180_qos_macrotile
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index f8a74f6cdc4c..64740ddb983e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
int i;
for (i = 0; i < ctx->mixer_count; i++) {
- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
+
+ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 4fd913522931..ae48f41821cf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -471,30 +471,68 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1]))
return rc;
- /*TODO: Support two independent DSI connectors */
- encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
- if (IS_ERR(encoder)) {
- DPU_ERROR("encoder init failed for dsi display\n");
- return PTR_ERR(encoder);
- }
-
- priv->encoders[priv->num_encoders++] = encoder;
-
+ /*
+ * We support following confiurations:
+ * - Single DSI host (dsi0 or dsi1)
+ * - Two independent DSI hosts
+ * - Bonded DSI0 and DSI1 hosts
+ *
+ * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
+ */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ int other = (i + 1) % 2;
+
if (!priv->dsi[i])
continue;
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->dsi[i]))
+ continue;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ memset(&info, 0, sizeof(info));
+ info.intf_type = encoder->encoder_type;
+
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
+
+ info.h_tile_instance[info.num_of_h_tiles++] = i;
+ info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
+ MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ other, rc);
+ break;
+ }
+
+ info.h_tile_instance[info.num_of_h_tiles++] = other;
+ }
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
}
return rc;
@@ -505,6 +543,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
int rc = 0;
if (!priv->dp)
@@ -516,6 +555,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return PTR_ERR(encoder);
}
+ memset(&info, 0, sizeof(info));
rc = msm_dp_modeset_init(priv->dp, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
@@ -524,6 +564,14 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
}
priv->encoders[priv->num_encoders++] = encoder;
+
+ info.num_of_h_tiles = 1;
+ info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ info.intf_type = encoder->encoder_type;
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
return rc;
}
@@ -726,41 +774,6 @@ static void dpu_kms_destroy(struct msm_kms *kms)
msm_kms_destroy(&dpu_kms->base);
}
-static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode)
-{
- struct msm_display_info info;
- struct msm_drm_private *priv = encoder->dev->dev_private;
- int i, rc = 0;
-
- memset(&info, 0, sizeof(info));
-
- info.intf_type = encoder->encoder_type;
- info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
- MSM_DISPLAY_CAP_VID_MODE;
-
- switch (info.intf_type) {
- case DRM_MODE_ENCODER_DSI:
- /* TODO: No support for DSI swap */
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (priv->dsi[i]) {
- info.h_tile_instance[info.num_of_h_tiles] = i;
- info.num_of_h_tiles++;
- }
- }
- break;
- case DRM_MODE_ENCODER_TMDS:
- info.num_of_h_tiles = 1;
- break;
- }
-
- rc = dpu_encoder_setup(encoder->dev, encoder, &info);
- if (rc)
- DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
- encoder->base.id, rc);
-}
-
static irqreturn_t dpu_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -863,7 +876,6 @@ static const struct msm_kms_funcs kms_funcs = {
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
.destroy = dpu_kms_destroy,
- .set_encoder_mode = _dpu_kms_set_encoder_mode,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = dpu_kms_debugfs_init,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index ec4a6f04394a..c989621209aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1339,9 +1339,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
return;
}
- pstate->base.plane = plane;
-
- plane->state = &pstate->base;
+ __drm_atomic_helper_plane_reset(plane, &pstate->base);
}
#ifdef CONFIG_DEBUG_FS
@@ -1647,6 +1645,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4a5b518288b0..cdcaf470f148 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev;
- uint32_t version, major, minor, dmap_cfg, vg_cfg;
+ u32 dmap_cfg, vg_cfg;
unsigned long clk;
int ret = 0;
pm_runtime_get_sync(dev->dev);
- mdp4_enable(mdp4_kms);
- version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
- mdp4_disable(mdp4_kms);
-
- major = FIELD(version, MDP4_VERSION_MAJOR);
- minor = FIELD(version, MDP4_VERSION_MINOR);
-
- DBG("found MDP4 version v%d.%d", major, minor);
-
- if (major != 4) {
- DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- mdp4_kms->rev = minor;
-
if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
-out:
pm_runtime_put_sync(dev->dev);
return ret;
@@ -108,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- /* see 119ecb7fd */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_crtc_vblank_get(crtc);
}
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
@@ -133,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- struct drm_crtc *crtc;
-
- /* see 119ecb7fd */
- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
- drm_crtc_vblank_put(crtc);
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -411,14 +379,32 @@ fail:
return ret;
}
+static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
+ u32 *major, u32 *minor)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 version;
+
+ mdp4_enable(mdp4_kms);
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
+
+ *major = FIELD(version, MDP4_VERSION_MAJOR);
+ *minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
+}
+
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_gem_address_space *aspace;
int irq, ret;
+ u32 major, minor;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -433,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- kms = &mdp4_kms->base.base;
+ priv->kms = &mdp4_kms->base.base;
+ kms = priv->kms;
mdp4_kms->dev = dev;
@@ -479,15 +466,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- if (mdp4_kms->rev >= 2) {
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
- }
- }
-
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
@@ -496,8 +474,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- if (mdp4_kms->lut_clk)
+
+ read_mdp_hw_revision(mdp4_kms, &major, &minor);
+
+ if (major != 4) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ }
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 81b0c7cf954e..1220f2b20e05 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -737,7 +737,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
}
/*
- * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
* interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 15aed45022bc..b3b42672b2d4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -209,13 +209,6 @@ static int mdp5_set_split_display(struct msm_kms *kms,
slave_encoder);
}
-static void mdp5_set_encoder_mode(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode)
-{
- mdp5_encoder_set_intf_mode(encoder, cmd_mode);
-}
-
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -287,7 +280,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .set_encoder_mode = mdp5_set_encoder_mode,
.destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init,
@@ -448,6 +440,9 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
}
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (!ret)
+ mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+
break;
}
default:
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index c92a9508c8d3..c22b07f68670 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 4a3293b590b0..eb40d8413bca 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
+ /* reset aux if link is in connected state */
+ if (dp_catalog_link_is_connected(aux->catalog))
+ dp_catalog_aux_reset(aux->catalog);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca96e3514790..21b6ac857710 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -372,6 +372,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ DRM_DEBUG_DP("enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
@@ -580,6 +581,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config = (en ? config | intr_mask : config & ~intr_mask);
+ DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
@@ -610,6 +612,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
u32 status;
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ DRM_DEBUG_DP("aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
@@ -685,6 +688,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
/* Make sure to clear the current pattern before starting a new one */
dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+ DRM_DEBUG_DP("pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
dp_write_link(catalog, REG_DP_STATE_CTRL,
@@ -745,7 +749,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
- DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+ DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern);
break;
}
}
@@ -928,7 +932,7 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
- DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+ DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ee221d835fa0..62e75dc8afc6 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -81,13 +81,6 @@ struct dp_ctrl_private {
struct completion video_comp;
};
-struct dp_cr_status {
- u8 lane_0_1;
- u8 lane_2_3;
-};
-
-#define DP_LANE0_1_CR_DONE 0x11
-
static int dp_aux_link_configure(struct drm_dp_aux *aux,
struct dp_link_info *link)
{
@@ -120,7 +113,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
pr_warn("PUSH_IDLE pattern timedout\n");
- pr_debug("mainlink off done\n");
+ DRM_DEBUG_DP("mainlink off done\n");
}
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
@@ -1011,6 +1004,8 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
u32 voltage_swing_level = link->phy_params.v_level;
u32 pre_emphasis_level = link->phy_params.p_level;
+ DRM_DEBUG_DP("voltage level: %d emphasis level: %d\n", voltage_swing_level,
+ pre_emphasis_level);
ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
voltage_swing_level, pre_emphasis_level);
@@ -1078,7 +1073,7 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
}
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -1107,9 +1102,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- cr->lane_0_1 = link_status[0];
- cr->lane_2_3 = link_status[1];
-
if (drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes)) {
return 0;
@@ -1186,7 +1178,7 @@ static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
}
static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int tries = 0, ret = 0;
char pattern;
@@ -1202,10 +1194,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
else
pattern = DP_TRAINING_PATTERN_2;
- ret = dp_ctrl_update_vx_px(ctrl);
- if (ret)
- return ret;
-
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
if (ret)
return ret;
@@ -1218,8 +1206,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
- cr->lane_0_1 = link_status[0];
- cr->lane_2_3 = link_status[1];
if (drm_dp_channel_eq_ok(link_status,
ctrl->link->link_params.num_lanes)) {
@@ -1239,7 +1225,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int ret = 0;
u8 encoding = DP_SET_ANSI_8B10B;
@@ -1255,7 +1241,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
- ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
goto end;
@@ -1264,7 +1250,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
/* print success info as this is a result of user initiated action */
DRM_DEBUG_DP("link training #1 successful\n");
- ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #2 failed. ret=%d\n", ret);
goto end;
@@ -1280,7 +1266,7 @@ end:
}
static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int ret = 0;
@@ -1295,7 +1281,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* a link training pattern, we have to first do soft reset.
*/
- ret = dp_ctrl_link_train(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train(ctrl, training_step);
return ret;
}
@@ -1382,6 +1368,7 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
if (reset)
dp_catalog_ctrl_reset(ctrl->catalog);
+ DRM_DEBUG_DP("flip=%d\n", flip);
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
@@ -1492,14 +1479,16 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
{
int ret = 0;
- struct dp_cr_status cr;
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
- ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@@ -1526,7 +1515,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
@@ -1630,6 +1619,35 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
}
}
+static bool dp_ctrl_clock_recovery_any_ok(
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int reduced_cnt;
+
+ if (lane_count <= 1)
+ return false;
+
+ /*
+ * only interested in the lane number after reduced
+ * lane_count = 4, then only interested in 2 lanes
+ * lane_count = 2, then only interested in 1 lane
+ */
+ reduced_cnt = lane_count >> 1;
+
+ return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
+}
+
+static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int num_lanes = ctrl->link->link_params.num_lanes;
+
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ return drm_dp_channel_eq_ok(link_status, num_lanes);
+}
+
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
@@ -1637,7 +1655,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
u32 rate = 0;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
- struct dp_cr_status cr;
+ u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
if (!dp_ctrl)
@@ -1664,6 +1682,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
return rc;
@@ -1677,19 +1698,21 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
training_step = DP_TRAINING_NONE;
- rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
/* training completed successfully */
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
- }
+
+ dp_ctrl_read_link_status(ctrl, link_status);
rc = dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
- if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+ if (dp_ctrl_clock_recovery_any_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
/*
* some lanes are ready,
* reduce lane number
@@ -1705,12 +1728,18 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
}
} else if (training_step == DP_TRAINING_2) {
- /* link train_2 failed, lower lane rate */
- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ /* link train_2 failed */
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
- }
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ if (!drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->link_params.num_lanes))
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ else
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+
if (rc < 0) {
/* end with failure */
break; /* lane == 1 already */
@@ -1721,17 +1750,19 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return rc;
- /* stop txing train pattern */
- dp_ctrl_clear_training_pattern(ctrl);
+ if (rc == 0) { /* link train successfully */
+ /*
+ * do not stop train pattern here
+ * stop link training at on_stream
+ * to pass compliance test
+ */
+ } else {
+ /*
+ * link training failed
+ * end txing train pattern here
+ */
+ dp_ctrl_clear_training_pattern(ctrl);
- /*
- * keep transmitting idle pattern until video ready
- * to avoid main link from loss of sync
- */
- if (rc == 0) /* link train successfully */
- dp_ctrl_push_idle(dp_ctrl);
- else {
- /* link training failed */
dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
}
@@ -1739,9 +1770,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
}
+static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+{
+ int training_step = DP_TRAINING_NONE;
+
+ return dp_ctrl_setup_main_link(ctrl, &training_step);
+}
+
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
{
- u32 rate = 0;
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
@@ -1751,10 +1788,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- rate = ctrl->panel->link_info.rate;
-
- ctrl->link->link_params.rate = rate;
- ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
@@ -1769,6 +1802,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
}
}
+ if (!dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+ dp_ctrl_clear_training_pattern(ctrl);
+
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 051c1be1de7e..419fd4a14cbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -55,7 +55,6 @@ enum {
EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
- EV_HPD_REPLUG_INT,
EV_HPD_UNPLUG_INT,
EV_USER_NOTIFICATION,
EV_CONNECT_PENDING_TIMEOUT,
@@ -102,8 +101,6 @@ struct dp_display_private {
struct dp_display_mode dp_mode;
struct msm_dp dp_display;
- bool encoder_mode_set;
-
/* wait for audio signaling */
struct completion audio_comp;
@@ -267,6 +264,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel)
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{
+ DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
+ dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0);
}
@@ -283,20 +282,6 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
}
-static void dp_display_set_encoder_mode(struct dp_display_private *dp)
-{
- struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- if (!dp->encoder_mode_set && dp->dp_display.encoder &&
- kms->funcs->set_encoder_mode) {
- kms->funcs->set_encoder_mode(kms,
- dp->dp_display.encoder, false);
-
- dp->encoder_mode_set = true;
- }
-}
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
@@ -312,6 +297,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->dp_display.is_connected = hpd;
+ DRM_DEBUG_DP("hpd=%d\n", hpd);
dp_display_send_hpd_event(&dp->dp_display);
return 0;
@@ -361,6 +347,7 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
{
bool flip = false;
+ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
if (dp->core_initialized) {
DRM_DEBUG_DP("DP core already initialized\n");
return;
@@ -369,8 +356,6 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
if (dp->usbpd->orientation == ORIENTATION_CC2)
flip = true;
- dp_display_set_encoder_mode(dp);
-
dp_power_init(dp->power, flip);
dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux);
@@ -465,8 +450,10 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
+ DRM_DEBUG_DP("%d\n", sink_request);
if (dp->hpd_state == ST_DISCONNECTED) {
if (sink_request & DP_LINK_STATUS_UPDATED) {
+ DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request);
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
return -EINVAL;
}
@@ -498,6 +485,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
rc = dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
+ DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp);
else
@@ -520,6 +508,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
@@ -655,6 +644,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* start sentinel checking in case of missing uevent */
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(g_dp_display, false);
@@ -713,6 +703,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
}
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
mutex_unlock(&dp->event_mutex);
@@ -854,6 +845,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
dp_display = g_dp_display;
+ DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
DRM_DEBUG_DP("Link already setup, return\n");
return 0;
@@ -915,6 +907,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
dp_display->power_on = false;
+ DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count);
return 0;
}
@@ -1014,10 +1007,8 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{
struct dp_display_private *dp_display;
- struct drm_device *drm;
dp_display = container_of(dp, struct dp_display_private, dp_display);
- drm = dp->drm_dev;
/*
* if we are reading registers we need the link clocks to be on
@@ -1118,9 +1109,6 @@ static int hpd_event_thread(void *data)
case EV_IRQ_HPD_INT:
dp_irq_hpd_handle(dp_priv, todo->data);
break;
- case EV_HPD_REPLUG_INT:
- /* do nothing */
- break;
case EV_USER_NOTIFICATION:
dp_display_send_hpd_notification(dp_priv,
todo->data);
@@ -1162,12 +1150,11 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+ DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status);
if (hpd_isr_status & 0x0F) {
/* hpd related interrupts */
- if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
- hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- }
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
/* stop sentinel connect pending checking */
@@ -1175,8 +1162,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
- if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+ }
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
@@ -1285,12 +1274,15 @@ static int dp_pm_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct msm_dp *dp_display = platform_get_drvdata(pdev);
struct dp_display_private *dp;
- u32 status;
+ int sink_count = 0;
dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
+ DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
/* start from disconnected state */
dp->hpd_state = ST_DISCONNECTED;
@@ -1299,18 +1291,33 @@ static int dp_pm_resume(struct device *dev)
dp_catalog_ctrl_hpd_config(dp->catalog);
- status = dp_catalog_link_is_connected(dp->catalog);
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+
+ if (dp_catalog_link_is_connected(dp->catalog)) {
+ sink_count = drm_dp_read_sink_count(dp->aux);
+ if (sink_count < 0)
+ sink_count = 0;
+ }
+ dp->link->sink_count = sink_count;
/*
* can not declared display is connected unless
* HDMI cable is plugged in and sink_count of
* dongle become 1
*/
- if (status && dp->link->sink_count)
+ if (dp->link->sink_count)
dp->dp_display.is_connected = true;
else
dp->dp_display.is_connected = false;
+ DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
+ dp->link->sink_count, dp->dp_display.is_connected,
+ dp->core_initialized, dp_display->power_on);
+
mutex_unlock(&dp->event_mutex);
return 0;
@@ -1326,6 +1333,9 @@ static int dp_pm_suspend(struct device *dev)
mutex_lock(&dp->event_mutex);
+ DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
if (dp->core_initialized == true) {
/* mainlink enabled */
if (dp_power_clk_status(dp->power, DP_CTRL_PM))
@@ -1339,6 +1349,9 @@ static int dp_pm_suspend(struct device *dev)
/* host_init will be called at pm_resume */
dp->core_initialized = false;
+ DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
mutex_unlock(&dp->event_mutex);
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1195044a7a3b..a5bdfc5029de 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1027,43 +1027,29 @@ int dp_link_process_request(struct dp_link *dp_link)
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
- return ret;
- }
-
- ret = dp_link_process_ds_port_status_change(link);
- if (!ret) {
+ } else if (!dp_link_process_ds_port_status_change(link)) {
dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
- return ret;
- }
-
- ret = dp_link_process_link_training_request(link);
- if (!ret) {
+ } else if (!dp_link_process_link_training_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_TRAINING;
- return ret;
- }
-
- ret = dp_link_process_phy_test_pattern_request(link);
- if (!ret) {
+ } else if (!dp_link_process_phy_test_pattern_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
- return ret;
- }
-
- ret = dp_link_process_link_status_update(link);
- if (!ret) {
- dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
- return ret;
- }
-
- if (dp_link_is_video_pattern_requested(link)) {
- ret = 0;
- dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
- }
-
- if (dp_link_is_audio_pattern_requested(link)) {
- dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
- return -EINVAL;
+ } else {
+ ret = dp_link_process_link_status_update(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ } else {
+ if (dp_link_is_video_pattern_requested(link)) {
+ ret = 0;
+ dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ }
+ if (dp_link_is_audio_pattern_requested(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ ret = -EINVAL;
+ }
+ }
}
+ DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 440b32753430..2181b60e1d1d 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
struct edid *last_block;
u8 *raw_edid;
- bool is_edid_corrupt;
+ bool is_edid_corrupt = false;
if (!edid) {
DRM_ERROR("invalid edid input\n");
@@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
- u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+ u8 checksum;
+
+ if (dp_panel->edid)
+ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+ else
+ checksum = dp_panel->connector->real_edid_checksum;
dp_link_send_edid_checksum(panel->link, checksum);
dp_link_send_test_response(panel->link);
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 3961ba4efc3c..b48b45e92bfa 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -208,6 +208,9 @@ static int dp_power_clk_set_rate(struct dp_power_private *power,
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
+ DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
+ dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
+
if (pm_type == DP_CORE_PM)
return dp_power->core_clks_on;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 75afc12a7b25..614dc7f26f2c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -13,6 +13,13 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
return msm_dsi->encoder;
}
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
+
+ return !(host_flags & MIPI_DSI_MODE_VIDEO);
+}
+
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
@@ -26,8 +33,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
}
phy_pdev = of_find_device_by_node(phy_node);
- if (phy_pdev)
+ if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev);
+ msm_dsi->phy_dev = &phy_pdev->dev;
+ }
of_node_put(phy_node);
@@ -36,8 +45,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
return -EPROBE_DEFER;
}
- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
-
return 0;
}
@@ -244,8 +251,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- msm_dsi_manager_setup_encoder(msm_dsi->id);
-
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
priv->connectors[priv->num_connectors++] = msm_dsi->connector;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 9b8e9b07eced..b50db91cb8a7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -80,10 +80,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
-void msm_dsi_manager_setup_encoder(int id);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
bool msm_dsi_manager_validate_current_config(u8 id);
+void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -109,7 +109,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
- bool is_dual_dsi);
+ bool is_bonded_dsi, struct msm_dsi_phy *phy);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
@@ -123,7 +123,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
- bool is_dual_dsi);
+ bool is_bonded_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
@@ -145,9 +145,11 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
-int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
-int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
+
/* dsi phy */
struct msm_dsi_phy;
struct msm_dsi_phy_shared_timings {
@@ -164,10 +166,9 @@ struct msm_dsi_phy_clk_request {
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
-void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_shared_timings *shared_timing);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
@@ -175,6 +176,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable);
#endif /* __DSI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index eadbcc78fd72..49b551ad1bff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -105,6 +105,32 @@ enum dsi_lane_swap {
LANE_SWAP_3210 = 7,
};
+enum video_config_bpp {
+ VIDEO_CONFIG_18BPP = 0,
+ VIDEO_CONFIG_24BPP = 1,
+};
+
+enum video_pattern_sel {
+ VID_PRBS = 0,
+ VID_INCREMENTAL = 1,
+ VID_FIXED = 2,
+ VID_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_mdp_stream0_pattern_sel {
+ CMD_MDP_PRBS = 0,
+ CMD_MDP_INCREMENTAL = 1,
+ CMD_MDP_FIXED = 2,
+ CMD_MDP_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_dma_pattern_sel {
+ CMD_DMA_PRBS = 0,
+ CMD_DMA_INCREMENTAL = 1,
+ CMD_DMA_FIXED = 2,
+ CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3,
+};
+
#define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100
@@ -518,6 +544,7 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
#define REG_DSI_LANE_CTRL 0x000000a8
+#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000
#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
@@ -564,6 +591,53 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_PHY_RESET 0x00000128
#define DSI_PHY_RESET_RESET 0x00000001
+#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160
+
+#define REG_DSI_TPG_MAIN_CONTROL 0x00000198
+#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100
+
+#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0
+#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003
+#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0
+static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val)
+{
+ return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK;
+}
+#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004
+
+#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002
+#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001
+
+#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000
+
#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index f3f1c03c7db9..96bbc8b6d009 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -32,9 +32,8 @@ static const char * const dsi_6g_bus_clk_names[] = {
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 4,
+ .num = 3,
.regs = {
- {"gdsc", -1, -1},
{"vdd", 150000, 100}, /* 3.0 V */
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
@@ -53,9 +52,8 @@ static const char * const dsi_8916_bus_clk_names[] = {
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 3,
+ .num = 2,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
@@ -73,9 +71,8 @@ static const char * const dsi_8976_bus_clk_names[] = {
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 3,
+ .num = 2,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
@@ -89,9 +86,8 @@ static const struct msm_dsi_config msm8976_dsi_cfg = {
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 7,
+ .num = 6,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.25 V */
{"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 10000, 100}, /* 1.0 V */
@@ -154,7 +150,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
.reg_cfg = {
.num = 2,
.regs = {
- {"vdd", 73400, 32 }, /* 0.9 V */
{"vdda", 12560, 4 }, /* 1.2 V */
},
},
@@ -200,6 +195,24 @@ static const struct msm_dsi_config sc7180_dsi_cfg = {
.num_dsi = 1,
};
+static const char * const dsi_sc7280_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct msm_dsi_config sc7280_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 8350, 0 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sc7280_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -267,6 +280,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
+ &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index ade9b609c7d9..41e99a9fb5de 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -24,6 +24,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
+#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -47,7 +48,7 @@ struct msm_dsi_host_cfg_ops {
void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
void (*tx_buf_put)(struct msm_dsi_host *msm_host);
int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
- int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
};
struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index b466a4af7c3e..e269df285136 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -27,6 +27,7 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
#include "msm_gem.h"
+#include "phy/dsi_phy.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
@@ -167,6 +168,9 @@ struct msm_dsi_host {
int dlane_swap;
int num_data_lanes;
+ /* from phy DT */
+ bool cphy_mode;
+
u32 dma_cmd_ctrl_restore;
bool registered;
@@ -203,35 +207,22 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
- struct regulator *gdsc_reg;
struct clk *ahb_clk;
int ret;
u32 major = 0, minor = 0;
- gdsc_reg = regulator_get(dev, "gdsc");
- if (IS_ERR(gdsc_reg)) {
- pr_err("%s: cannot get gdsc\n", __func__);
- goto exit;
- }
-
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
- goto put_gdsc;
+ goto exit;
}
pm_runtime_get_sync(dev);
- ret = regulator_enable(gdsc_reg);
- if (ret) {
- pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_gdsc;
- }
-
ret = clk_prepare_enable(ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
- goto disable_gdsc;
+ goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
@@ -246,11 +237,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
disable_clks:
clk_disable_unprepare(ahb_clk);
-disable_gdsc:
- regulator_disable(gdsc_reg);
+runtime_put:
pm_runtime_put_sync(dev);
-put_gdsc:
- regulator_put(gdsc_reg);
exit:
return cfg_hnd;
}
@@ -510,6 +498,7 @@ int msm_dsi_runtime_resume(struct device *dev)
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
+ u32 byte_intf_rate;
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%d",
@@ -529,8 +518,13 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
}
if (msm_host->byte_intf_clk) {
- ret = clk_set_rate(msm_host->byte_intf_clk,
- msm_host->byte_clk_rate / 2);
+ /* For CPHY, byte_intf_clk is same as byte_clk */
+ if (msm_host->cphy_mode)
+ byte_intf_rate = msm_host->byte_clk_rate;
+ else
+ byte_intf_rate = msm_host->byte_clk_rate / 2;
+
+ ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate);
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
@@ -679,7 +673,7 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
-static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 pclk_rate;
@@ -687,22 +681,22 @@ static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
pclk_rate = mode->clock * 1000;
/*
- * For dual DSI mode, the current DRM mode has the complete width of the
+ * For bonded DSI mode, the current DRM mode has the complete width of the
* panel. Since, the complete panel is driven by two DSI controllers,
* the clock rates have to be split between the two dsi controllers.
* Adjust the byte and pixel clock rates for each dsi host accordingly.
*/
- if (is_dual_dsi)
+ if (is_bonded_dsi)
pclk_rate /= 2;
return pclk_rate;
}
-static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
u64 pclk_bpp = (u64)pclk_rate * bpp;
if (lanes == 0) {
@@ -710,7 +704,11 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
lanes = 1;
}
- do_div(pclk_bpp, (8 * lanes));
+ /* CPHY "byte_clk" is in units of 16 bits */
+ if (msm_host->cphy_mode)
+ do_div(pclk_bpp, (16 * lanes));
+ else
+ do_div(pclk_bpp, (8 * lanes));
msm_host->pixel_clk_rate = pclk_rate;
msm_host->byte_clk_rate = pclk_bpp;
@@ -720,28 +718,28 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
}
-int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
if (!msm_host->mode) {
pr_err("%s: mode not set\n", __func__);
return -EINVAL;
}
- dsi_calc_pclk(msm_host, is_dual_dsi);
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
return 0;
}
-int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u32 bpp = dsi_get_bpp(msm_host->format);
u64 pclk_bpp;
unsigned int esc_mhz, esc_div;
unsigned long byte_mhz;
- dsi_calc_pclk(msm_host, is_dual_dsi);
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
- pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp;
do_div(pclk_bpp, 8);
msm_host->src_clk_rate = pclk_bpp;
@@ -834,7 +832,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
}
static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
@@ -929,6 +927,10 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
+
+ if (msm_dsi_phy_set_continuous_clock(phy, enable))
+ lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY;
+
dsi_write(msm_host, REG_DSI_LANE_CTRL,
lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
}
@@ -936,9 +938,12 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_CTRL_ENABLE;
dsi_write(msm_host, REG_DSI_CTRL, data);
+
+ if (msm_host->cphy_mode)
+ dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -956,13 +961,13 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
DBG("");
/*
- * For dual DSI mode, the current DRM mode has
+ * For bonded DSI mode, the current DRM mode has
* the complete width of the panel. Since, the complete
* panel is driven by two DSI controllers, the horizontal
* timings have to be split between the two dsi controllers.
* Adjust the DSI host timing values accordingly.
*/
- if (is_dual_dsi) {
+ if (is_bonded_dsi) {
h_total /= 2;
hs_end /= 2;
ha_start /= 2;
@@ -2226,6 +2231,8 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
+ msm_host->cphy_mode = src_phy->cphy_mode;
+
ret = msm_dsi_phy_get_clk_provider(src_phy,
&byte_clk_provider, &pixel_clk_provider);
if (ret) {
@@ -2285,19 +2292,26 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
- bool is_dual_dsi)
+ bool is_bonded_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
}
- clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+ /* CPHY transmits 16 bits over 7 clock cycles
+ * "byte_clk" is in units of 16-bits (see dsi_calc_pclk),
+ * so multiply by 7 to get the "bitclk rate"
+ */
+ if (msm_host->cphy_mode)
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 7;
+ else
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
@@ -2354,7 +2368,7 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
- bool is_dual_dsi)
+ bool is_bonded_dsi, struct msm_dsi_phy *phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
@@ -2392,9 +2406,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host, is_dual_dsi);
+ dsi_timing_setup(msm_host, is_bonded_dsi);
dsi_sw_reset(msm_host);
- dsi_ctrl_config(msm_host, true, phy_shared_timings);
+ dsi_ctrl_config(msm_host, true, phy_shared_timings, phy);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1);
@@ -2425,7 +2439,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
goto unlock_ret;
}
- dsi_ctrl_config(msm_host, false, NULL);
+ dsi_ctrl_config(msm_host, false, NULL, NULL);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
@@ -2495,3 +2509,64 @@ void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_ho
pm_runtime_put_sync(&msm_host->pdev->dev);
}
+
+static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL,
+ DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN);
+ /* use 24-bit RGB test pttern */
+ dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG,
+ DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) |
+ DSI_TPG_VIDEO_CONFIG_RGB);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN);
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ DBG("Video test pattern setup done\n");
+}
+
+static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ /* initial value for test pattern */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2,
+ DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN);
+
+ DBG("Cmd test pattern setup done\n");
+}
+
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO);
+ u32 reg;
+
+ if (is_video_mode)
+ msm_dsi_host_video_test_pattern_setup(msm_host);
+ else
+ msm_dsi_host_cmd_test_pattern_setup(msm_host);
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+ /* enable the test pattern generator */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN));
+
+ /* for command mode need to trigger one frame from tpg */
+ if (!is_video_mode)
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER,
+ DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4ebfedc4a9ac..c41d39f5b7cf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -21,14 +21,14 @@
struct msm_dsi_manager {
struct msm_dsi *dsi[DSI_MAX];
- bool is_dual_dsi;
+ bool is_bonded_dsi;
bool is_sync_needed;
int master_dsi_link_id;
};
static struct msm_dsi_manager msm_dsim_glb;
-#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi)
+#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi)
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
@@ -42,18 +42,17 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
}
-static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
+static int dsi_mgr_parse_of(struct device_node *np, int id)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
- /* We assume 2 dsi nodes have the same information of dual-dsi and
- * sync-mode, and only one node specifies master in case of dual mode.
+ /* We assume 2 dsi nodes have the same information of bonded dsi and
+ * sync-mode, and only one node specifies master in case of bonded mode.
*/
- if (!msm_dsim->is_dual_dsi)
- msm_dsim->is_dual_dsi = of_property_read_bool(
- np, "qcom,dual-dsi-mode");
+ if (!msm_dsim->is_bonded_dsi)
+ msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode");
- if (msm_dsim->is_dual_dsi) {
+ if (msm_dsim->is_bonded_dsi) {
if (of_property_read_bool(np, "qcom,master-dsi"))
msm_dsim->master_dsi_link_id = id;
if (!msm_dsim->is_sync_needed)
@@ -72,7 +71,7 @@ static int dsi_mgr_setup_components(int id)
struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
- if (!IS_DUAL_DSI()) {
+ if (!IS_BONDED_DSI()) {
ret = msm_dsi_host_register(msm_dsi->host, true);
if (ret)
return ret;
@@ -100,7 +99,7 @@ static int dsi_mgr_setup_components(int id)
if (ret)
return ret;
- /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
+ /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
msm_dsi_phy_set_usecase(clk_master_dsi->phy,
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
@@ -119,12 +118,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
- ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req);
- msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
+ ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
return ret;
}
@@ -138,12 +136,12 @@ dsi_mgr_phy_enable(int id,
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
- /* In case of dual DSI, some registers in PHY1 have been programmed
+ /* In case of bonded DSI, some registers in PHY1 have been programmed
* during PLL0 clock's set_rate. The PHY1 reset called by host1 here
* will silently reset those PHY1 registers. Therefore we need to reset
* and enable both PHYs before any PLL clock operation.
*/
- if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(sdsi->host);
@@ -178,11 +176,11 @@ static void dsi_mgr_phy_disable(int id)
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
/* disable DSI phy
- * In dual-dsi configuration, the phy should be disabled for the
+ * In bonded dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
- if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
@@ -217,24 +215,6 @@ static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
return dsi_bridge->id;
}
-static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi)
-{
- unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
- return !(host_flags & MIPI_DSI_MODE_VIDEO);
-}
-
-void msm_dsi_manager_setup_encoder(int id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_drm_private *priv = msm_dsi->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
-
- if (encoder && kms->funcs->set_encoder_mode)
- kms->funcs->set_encoder_mode(kms, encoder,
- dsi_mgr_is_cmd_mode(msm_dsi));
-}
-
static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
{
struct msm_drm_private *priv = conn->dev->dev_private;
@@ -244,7 +224,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
struct msm_dsi *master_dsi, *slave_dsi;
struct drm_panel *panel;
- if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) {
+ if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
slave_dsi = msm_dsi;
} else {
@@ -253,7 +233,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
}
/*
- * There is only 1 panel in the global panel list for dual DSI mode.
+ * There is only 1 panel in the global panel list for bonded DSI mode.
* Therefore slave dsi should get the drm_panel instance from master
* dsi.
*/
@@ -264,20 +244,20 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
return PTR_ERR(panel);
}
- if (!panel || !IS_DUAL_DSI())
+ if (!panel || !IS_BONDED_DSI())
goto out;
drm_object_attach_property(&conn->base,
conn->dev->mode_config.tile_property, 0);
/*
- * Set split display info to kms once dual DSI panel is connected to
+ * Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
- dsi_mgr_is_cmd_mode(msm_dsi));
+ msm_dsi_is_cmd_mode(msm_dsi));
}
out:
@@ -317,7 +297,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return 0;
/*
- * In dual DSI mode, we have one connector that can be
+ * In bonded DSI mode, we have one connector that can be
* attached to the drm_panel.
*/
num = drm_panel_get_modes(panel, connector);
@@ -366,30 +346,30 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
ret = dsi_mgr_phy_enable(id, phy_shared_timings);
if (ret)
goto phy_en_fail;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
}
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1], is_dual_dsi);
+ &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -415,7 +395,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_en_fail;
}
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_enable(msm_dsi1->host);
if (ret) {
pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -431,7 +411,7 @@ host_en_fail:
if (panel)
drm_panel_unprepare(panel);
panel_prep_fail:
- if (is_dual_dsi && msm_dsi1)
+ if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
msm_dsi_host_power_off(host);
@@ -441,20 +421,33 @@ phy_en_fail:
return;
}
+void msm_dsi_manager_tpg_enable(void)
+{
+ struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0);
+ struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1);
+
+ /* if dual dsi, trigger tpg on master first then slave */
+ if (m_dsi) {
+ msm_dsi_host_test_pattern_en(m_dsi->host);
+ if (IS_BONDED_DSI() && s_dsi)
+ msm_dsi_host_test_pattern_en(s_dsi->host);
+ }
+}
+
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
@@ -471,15 +464,15 @@ static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
@@ -497,7 +490,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
@@ -506,18 +499,18 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is slave-DSI in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of bonded DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_disable(msm_dsi1->host);
if (ret)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
@@ -537,7 +530,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_off(msm_dsi1->host);
if (ret)
pr_err("%s: host1 power off failed, %d\n",
@@ -556,15 +549,15 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
- if (is_dual_dsi && other_dsi)
+ if (is_bonded_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
}
@@ -640,15 +633,15 @@ fail:
bool msm_dsi_manager_validate_current_config(u8 id)
{
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
/*
- * For dual DSI, we only have one drm panel. For this
+ * For bonded DSI, we only have one drm panel. For this
* use case, we register only one bridge/connector.
* Skip bridge/connector initialisation if it is
- * slave-DSI for dual DSI configuration.
+ * slave-DSI for bonded DSI configuration.
*/
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) {
DBG("Skip bridge registration for slave DSI->id: %d\n", id);
return false;
}
@@ -740,7 +733,7 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
if (!msg->tx_buf || !msg->tx_len)
return 0;
- /* In dual master case, panel requires the same commands sent to
+ /* In bonded master case, panel requires the same commands sent to
* both DSI links. Host issues the command trigger to both links
* when DSI_1 calls the cmd transfer function, no matter it happens
* before or after DSI_0 cmd transfer.
@@ -809,9 +802,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
msm_dsim->dsi[id] = msm_dsi;
- ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
+ ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id);
if (ret) {
- pr_err("%s: failed to parse dual DSI info\n", __func__);
+ pr_err("%s: failed to parse OF DSI info\n", __func__);
goto fail;
}
@@ -840,3 +833,12 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
msm_dsim->dsi[msm_dsi->id] = NULL;
}
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_BONDED_DSI();
+}
+
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_MASTER_DSI_LINK(msm_dsi->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 6ca6bfd4809b..8c65ef6968ca 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -461,6 +462,51 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x7;
+ s32 tmax, tmin;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x7 = ui * 7;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x7;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
+
+ tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
+ tmax = 255;
+ timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
+
+ tmin = 1;
+ tmax = 32;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
+
+ tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
+ tmax = 64;
+ timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
+
+ DBG("%d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
+
+ return 0;
+}
+
static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
@@ -593,6 +639,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sc7280-dsi-phy-7nm",
+ .data = &dsi_phy_7nm_7280_cfgs },
#endif
{}
};
@@ -625,17 +673,13 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
+ u32 phy_type;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_node(dsi_phy_dt_match, dev->of_node);
- if (!match)
- return -ENODEV;
-
phy->provided_clocks = devm_kzalloc(dev,
struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
GFP_KERNEL);
@@ -644,7 +688,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->provided_clocks->num = NUM_PROVIDED_CLKS;
- phy->cfg = match->data;
+ phy->cfg = of_device_get_match_data(&pdev->dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
@@ -657,6 +704,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
+ if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
+ phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
if (IS_ERR(phy->base)) {
@@ -754,7 +803,8 @@ void __exit msm_dsi_phy_driver_unregister(void)
}
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings)
{
struct device *dev = &phy->pdev->dev;
int ret;
@@ -782,6 +832,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto phy_en_fail;
}
+ memcpy(shared_timings, &phy->timing.shared_timings,
+ sizeof(*shared_timings));
+
/*
* Resetting DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of
@@ -821,13 +874,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
dsi_phy_disable_resource(phy);
}
-void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_shared_timings *shared_timings)
-{
- memcpy(shared_timings, &phy->timing.shared_timings,
- sizeof(*shared_timings));
-}
-
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc)
{
@@ -835,6 +881,15 @@ void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
phy->usecase = uc;
}
+/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!phy || !phy->cfg->ops.set_continuous_clock)
+ return false;
+
+ return phy->cfg->ops.set_continuous_clock(phy, enable);
+}
+
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 5b0feef87127..b91303ada74f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -24,6 +24,7 @@ struct msm_dsi_phy_ops {
void (*disable)(struct msm_dsi_phy *phy);
void (*save_pll_state)(struct msm_dsi_phy *phy);
int (*restore_pll_state)(struct msm_dsi_phy *phy);
+ bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
};
struct msm_dsi_phy_cfg {
@@ -51,6 +52,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
@@ -99,6 +101,7 @@ struct msm_dsi_phy {
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
+ bool cphy_mode;
struct clk_hw *vco_hw;
bool pll_on;
@@ -119,5 +122,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
#endif /* __DSI_PHY_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index e46b10fc793a..d8128f50b0dd 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -84,7 +84,7 @@ struct dsi_pll_10nm {
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index a34cf151c517..d13552b2213b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -86,7 +86,7 @@ struct dsi_pll_14nm {
/*
* Private struct for N1/N2 post-divider clocks. These clocks are similar to
* the generic clk_divider class of clocks. The only difference is that it
- * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
*/
struct dsi_pll_14nm_postdiv {
struct clk_hw hw;
@@ -102,7 +102,7 @@ struct dsi_pll_14nm_postdiv {
#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
@@ -658,7 +658,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << shift;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
- /* If we're master in dual DSI mode, then the slave PLL's post-dividers
+ /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
* follow the master's post dividers
*/
if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
@@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.reg_cfg = {
.num = 1,
.regs = {
- {"vcca", 17000, 32},
+ {"vcca", 73400, 32},
},
},
.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 7c23d4c47338..cb297b08458e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -83,7 +83,7 @@ struct dsi_pll_7nm {
#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
@@ -256,7 +256,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
}
@@ -642,7 +642,8 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -663,32 +664,47 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 4);
+ if (pll_7nm->phy->cphy_mode)
+ hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
+ else
+ hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
- goto fail;
+ /* in CPHY mode, pclk_mux will always have post_out_div as parent
+ * don't register a pclk_mux clock and just use post_out_div instead
+ */
+ if (pll_7nm->phy->cphy_mode) {
+ u32 data;
+
+ data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
+
+ snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+ } else {
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux(dev, clk_name,
+ ((const char *[]){
+ parent, parent2, parent3, parent4
+ }), 4, 0, pll_7nm->phy->base +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
@@ -813,15 +829,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
- u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+ u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
+ u32 glbl_pemph_ctrl_0;
+ u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
u32 data;
DBG("");
- if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
+ if (phy->cphy_mode)
+ ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req);
+ else
+ ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
+ if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -842,6 +864,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+ /* For C-PHY, no low power settings for lower clk rate */
+ if (phy->cphy_mode)
+ less_than_1500_mhz = false;
+
if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
@@ -856,6 +882,17 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x51;
+ vreg_ctrl_1 = 0x55;
+ glbl_pemph_ctrl_0 = 0x11;
+ lane_ctrl0 = 0x17;
+ } else {
+ vreg_ctrl_1 = 0x5c;
+ glbl_pemph_ctrl_0 = 0x00;
+ lane_ctrl0 = 0x1f;
+ }
+
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
@@ -876,15 +913,22 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
+
/* Enable LDO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
+
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
glbl_hstx_str_ctrl_0);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
+ glbl_pemph_ctrl_0);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
glbl_rescode_top_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
@@ -894,10 +938,11 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
/* Select full-rate mode */
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+ if (!phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
ret = dsi_7nm_set_usecase(phy);
if (ret) {
@@ -907,22 +952,36 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
}
/* DSI PHY timings */
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
- timing->shared_timings.clk_pre);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
- timing->shared_timings.clk_post);
+ if (phy->cphy_mode) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
+ timing->shared_timings.clk_post);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ } else {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+ timing->shared_timings.clk_post);
+ }
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy);
@@ -932,6 +991,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
return 0;
}
+static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
+ if (enable)
+ data |= BIT(5) | BIT(6);
+ else
+ data &= ~(BIT(5) | BIT(6));
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
+
+ return enable;
+}
+
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
@@ -972,6 +1046,7 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
@@ -998,9 +1073,36 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL,
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
+ .has_phy_lane = true,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 37550, 0},
+ },
+ },
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000ULL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a332b09a5a11..2e6fc185e54d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -603,6 +603,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (IS_ERR(priv->event_thread[i].worker)) {
ret = PTR_ERR(priv->event_thread[i].worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ ret = PTR_ERR(priv->event_thread[i].worker);
goto err_msm_uninit;
}
@@ -1057,17 +1058,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = msm_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(fops);
static const struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
@@ -1083,7 +1074,7 @@ static const struct drm_driver msm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = msm_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 1a48a709ffb3..8b005d1ac899 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -309,7 +309,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -350,7 +349,9 @@ void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder);
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
-
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -367,7 +368,18 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
{
}
-
+static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
#endif
#ifdef CONFIG_DRM_MSM_DP
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 67fae60f2fa5..0daaeb54ff6f 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -8,13 +8,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
-extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/*
@@ -48,15 +47,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
- int ret = 0;
- ret = drm_gem_mmap_obj(bo, bo->size, vma);
- if (ret) {
- pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
- return ret;
- }
-
- return msm_gem_mmap_obj(bo, vma);
+ return drm_gem_prime_mmap(bo, vma);
}
static int msm_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 5db07fc287ad..22308a1b66fc 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -217,31 +217,6 @@ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
return prot;
}
-int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
-
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -817,8 +792,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
mutex_lock(&priv->mm_lock);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
- list_del(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+ list_move_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
@@ -1077,6 +1051,17 @@ void msm_gem_free_object(struct drm_gem_object *obj)
kfree(msm_obj);
}
+static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
@@ -1114,6 +1099,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
+ .mmap = msm_gem_object_mmap,
.vm_ops = &vm_ops,
};
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f9e3ffb2309a..e39a8e7ad843 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -106,9 +106,6 @@ struct msm_gem_object {
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
-int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 9880348a4dc7..fc94e061d6a7 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -39,17 +39,6 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
msm_gem_put_vaddr(obj);
}
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9de7c42e1071..de2bc3467bb5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -117,9 +117,6 @@ struct msm_kms_funcs {
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
- void (*set_encoder_mode)(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ac8c3251b616..4f0fbf667431 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -785,7 +785,7 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
__free_page(rdev->dummy_page.page);
@@ -808,8 +808,8 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
return;
- pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6640b7c947fe..ca382fbf7a86 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -168,6 +168,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
break;
case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 5043dcaf1cf9..1650a448eabd 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,6 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
+ select INTERCONNECT
select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index d6cf202414f0..d801909182cf 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -3,6 +3,9 @@ ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
tegra-drm-y := \
drm.o \
+ uapi.o \
+ submit.o \
+ firewall.o \
gem.o \
fb.o \
dp.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 51bbbc42a144..16c7aabb94d3 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/iommu.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -618,9 +619,14 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
int err;
+ plane_state->peak_memory_bandwidth = 0;
+ plane_state->avg_memory_bandwidth = 0;
+
/* no need for further checks if the plane is being disabled */
- if (!new_plane_state->crtc)
+ if (!new_plane_state->crtc) {
+ plane_state->total_peak_memory_bandwidth = 0;
return 0;
+ }
err = tegra_plane_format(new_plane_state->fb->format->format,
&plane_state->format,
@@ -808,6 +814,12 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
formats = dc->soc->primary_formats;
modifiers = dc->soc->modifiers;
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, modifiers, type, NULL);
@@ -845,12 +857,18 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
struct tegra_plane *tegra = to_tegra_plane(plane);
int err;
+ plane_state->peak_memory_bandwidth = 0;
+ plane_state->avg_memory_bandwidth = 0;
+
/* no need for further checks if the plane is being disabled */
- if (!new_plane_state->crtc)
+ if (!new_plane_state->crtc) {
+ plane_state->total_peak_memory_bandwidth = 0;
return 0;
+ }
/* scaling not supported for cursor */
if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) ||
@@ -1030,6 +1048,12 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
if (!dc->soc->has_nvdisplay) {
num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
formats = tegra_legacy_cursor_plane_formats;
+
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
} else {
num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
formats = tegra_cursor_plane_formats;
@@ -1149,6 +1173,12 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
num_formats = dc->soc->num_overlay_formats;
formats = dc->soc->overlay_formats;
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
if (!cursor)
type = DRM_PLANE_TYPE_OVERLAY;
else
@@ -1572,6 +1602,11 @@ static int tegra_dc_show_stats(struct seq_file *s, void *data)
seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+ seq_printf(s, "frames total: %lu\n", dc->stats.frames_total);
+ seq_printf(s, "vblank total: %lu\n", dc->stats.vblank_total);
+ seq_printf(s, "underflow total: %lu\n", dc->stats.underflow_total);
+ seq_printf(s, "overflow total: %lu\n", dc->stats.overflow_total);
+
return 0;
}
@@ -1804,6 +1839,106 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
return -ETIMEDOUT;
}
+static void
+tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ bool prepare_bandwidth_transition)
+{
+ const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
+ const struct tegra_dc_state *old_dc_state, *new_dc_state;
+ u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
+ const struct drm_plane_state *old_plane_state;
+ const struct drm_crtc_state *old_crtc_state;
+ struct tegra_dc_window window, old_window;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_plane *tegra;
+ struct drm_plane *plane;
+
+ if (dc->soc->has_nvdisplay)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_dc_state = to_const_dc_state(old_crtc_state);
+ new_dc_state = to_const_dc_state(crtc->state);
+
+ if (!crtc->state->active) {
+ if (!old_crtc_state->active)
+ return;
+
+ /*
+ * When CRTC is disabled on DPMS, the state of attached planes
+ * is kept unchanged. Hence we need to enforce removal of the
+ * bandwidths from the ICC paths.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ tegra = to_tegra_plane(plane);
+
+ icc_set_bw(tegra->icc_mem, 0, 0);
+ icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+ }
+
+ return;
+ }
+
+ for_each_old_plane_in_state(old_crtc_state->state, plane,
+ old_plane_state, i) {
+ old_tegra_state = to_const_tegra_plane_state(old_plane_state);
+ new_tegra_state = to_const_tegra_plane_state(plane->state);
+ tegra = to_tegra_plane(plane);
+
+ /*
+ * We're iterating over the global atomic state and it contains
+ * planes from another CRTC, hence we need to filter out the
+ * planes unrelated to this CRTC.
+ */
+ if (tegra->dc != dc)
+ continue;
+
+ new_avg_bw = new_tegra_state->avg_memory_bandwidth;
+ old_avg_bw = old_tegra_state->avg_memory_bandwidth;
+
+ new_peak_bw = new_tegra_state->total_peak_memory_bandwidth;
+ old_peak_bw = old_tegra_state->total_peak_memory_bandwidth;
+
+ /*
+ * See the comment related to !crtc->state->active above,
+ * which explains why bandwidths need to be updated when
+ * CRTC is turning ON.
+ */
+ if (new_avg_bw == old_avg_bw && new_peak_bw == old_peak_bw &&
+ old_crtc_state->active)
+ continue;
+
+ window.src.h = drm_rect_height(&plane->state->src) >> 16;
+ window.dst.h = drm_rect_height(&plane->state->dst);
+
+ old_window.src.h = drm_rect_height(&old_plane_state->src) >> 16;
+ old_window.dst.h = drm_rect_height(&old_plane_state->dst);
+
+ /*
+ * During the preparation phase (atomic_begin), the memory
+ * freq should go high before the DC changes are committed
+ * if bandwidth requirement goes up, otherwise memory freq
+ * should to stay high if BW requirement goes down. The
+ * opposite applies to the completion phase (post_commit).
+ */
+ if (prepare_bandwidth_transition) {
+ new_avg_bw = max(old_avg_bw, new_avg_bw);
+ new_peak_bw = max(old_peak_bw, new_peak_bw);
+
+ if (tegra_plane_use_vertical_filtering(tegra, &old_window))
+ window = old_window;
+ }
+
+ icc_set_bw(tegra->icc_mem, new_avg_bw, new_peak_bw);
+
+ if (tegra_plane_use_vertical_filtering(tegra, &window))
+ icc_set_bw(tegra->icc_mem_vfilter, new_avg_bw, new_peak_bw);
+ else
+ icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+ }
+}
+
static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1985,6 +2120,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
{
unsigned long flags;
+ tegra_crtc_update_memory_bandwidth(crtc, state, true);
+
if (crtc->state->event) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -2017,7 +2154,207 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
}
+static bool tegra_plane_is_cursor(const struct drm_plane_state *state)
+{
+ const struct tegra_dc_soc_info *soc = to_tegra_dc(state->crtc)->soc;
+ const struct drm_format_info *fmt = state->fb->format;
+ unsigned int src_w = drm_rect_width(&state->src) >> 16;
+ unsigned int dst_w = drm_rect_width(&state->dst);
+
+ if (state->plane->type != DRM_PLANE_TYPE_CURSOR)
+ return false;
+
+ if (soc->supports_cursor)
+ return true;
+
+ if (src_w != dst_w || fmt->num_planes != 1 || src_w * fmt->cpp[0] > 256)
+ return false;
+
+ return true;
+}
+
+static unsigned long
+tegra_plane_overlap_mask(struct drm_crtc_state *state,
+ const struct drm_plane_state *plane_state)
+{
+ const struct drm_plane_state *other_state;
+ const struct tegra_plane *tegra;
+ unsigned long overlap_mask = 0;
+ struct drm_plane *plane;
+ struct drm_rect rect;
+
+ if (!plane_state->visible || !plane_state->fb)
+ return 0;
+
+ /*
+ * Data-prefetch FIFO will easily help to overcome temporal memory
+ * pressure if other plane overlaps with the cursor plane.
+ */
+ if (tegra_plane_is_cursor(plane_state))
+ return 0;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, other_state, state) {
+ rect = plane_state->dst;
+
+ tegra = to_tegra_plane(other_state->plane);
+
+ if (!other_state->visible || !other_state->fb)
+ continue;
+
+ /*
+ * Ignore cursor plane overlaps because it's not practical to
+ * assume that it contributes to the bandwidth in overlapping
+ * area if window width is small.
+ */
+ if (tegra_plane_is_cursor(other_state))
+ continue;
+
+ if (drm_rect_intersect(&rect, &other_state->dst))
+ overlap_mask |= BIT(tegra->index);
+ }
+
+ return overlap_mask;
+}
+
+static int tegra_crtc_calculate_memory_bandwidth(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ ulong overlap_mask[TEGRA_DC_LEGACY_PLANES_NUM] = {}, mask;
+ u32 plane_peak_bw[TEGRA_DC_LEGACY_PLANES_NUM] = {};
+ bool all_planes_overlap_simultaneously = true;
+ const struct tegra_plane_state *tegra_state;
+ const struct drm_plane_state *plane_state;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ const struct drm_crtc_state *old_state;
+ struct drm_crtc_state *new_state;
+ struct tegra_plane *tegra;
+ struct drm_plane *plane;
+
+ /*
+ * The nv-display uses shared planes. The algorithm below assumes
+ * maximum 3 planes per-CRTC, this assumption isn't applicable to
+ * the nv-display. Note that T124 support has additional windows,
+ * but currently they aren't supported by the driver.
+ */
+ if (dc->soc->has_nvdisplay)
+ return 0;
+
+ new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * For overlapping planes pixel's data is fetched for each plane at
+ * the same time, hence bandwidths are accumulated in this case.
+ * This needs to be taken into account for calculating total bandwidth
+ * consumed by all planes.
+ *
+ * Here we get the overlapping state of each plane, which is a
+ * bitmask of plane indices telling with what planes there is an
+ * overlap. Note that bitmask[plane] includes BIT(plane) in order
+ * to make further code nicer and simpler.
+ */
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+ tegra_state = to_const_tegra_plane_state(plane_state);
+ tegra = to_tegra_plane(plane);
+
+ if (WARN_ON_ONCE(tegra->index >= TEGRA_DC_LEGACY_PLANES_NUM))
+ return -EINVAL;
+
+ plane_peak_bw[tegra->index] = tegra_state->peak_memory_bandwidth;
+ mask = tegra_plane_overlap_mask(new_state, plane_state);
+ overlap_mask[tegra->index] = mask;
+
+ if (hweight_long(mask) != 3)
+ all_planes_overlap_simultaneously = false;
+ }
+
+ /*
+ * Then we calculate maximum bandwidth of each plane state.
+ * The bandwidth includes the plane BW + BW of the "simultaneously"
+ * overlapping planes, where "simultaneously" means areas where DC
+ * fetches from the planes simultaneously during of scan-out process.
+ *
+ * For example, if plane A overlaps with planes B and C, but B and C
+ * don't overlap, then the peak bandwidth will be either in area where
+ * A-and-B or A-and-C planes overlap.
+ *
+ * The plane_peak_bw[] contains peak memory bandwidth values of
+ * each plane, this information is needed by interconnect provider
+ * in order to set up latency allowance based on the peak BW, see
+ * tegra_crtc_update_memory_bandwidth().
+ */
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+ u32 i, old_peak_bw, new_peak_bw, overlap_bw = 0;
+
+ /*
+ * Note that plane's atomic check doesn't touch the
+ * total_peak_memory_bandwidth of enabled plane, hence the
+ * current state contains the old bandwidth state from the
+ * previous CRTC commit.
+ */
+ tegra_state = to_const_tegra_plane_state(plane_state);
+ tegra = to_tegra_plane(plane);
+
+ for_each_set_bit(i, &overlap_mask[tegra->index], 3) {
+ if (i == tegra->index)
+ continue;
+
+ if (all_planes_overlap_simultaneously)
+ overlap_bw += plane_peak_bw[i];
+ else
+ overlap_bw = max(overlap_bw, plane_peak_bw[i]);
+ }
+
+ new_peak_bw = plane_peak_bw[tegra->index] + overlap_bw;
+ old_peak_bw = tegra_state->total_peak_memory_bandwidth;
+
+ /*
+ * If plane's peak bandwidth changed (for example plane isn't
+ * overlapped anymore) and plane isn't in the atomic state,
+ * then add plane to the state in order to have the bandwidth
+ * updated.
+ */
+ if (old_peak_bw != new_peak_bw) {
+ struct tegra_plane_state *new_tegra_state;
+ struct drm_plane_state *new_plane_state;
+
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(new_plane_state))
+ return PTR_ERR(new_plane_state);
+
+ new_tegra_state = to_tegra_plane_state(new_plane_state);
+ new_tegra_state->total_peak_memory_bandwidth = new_peak_bw;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ int err;
+
+ err = tegra_crtc_calculate_memory_bandwidth(crtc, state);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ /*
+ * Display bandwidth is allowed to go down only once hardware state
+ * is known to be armed, i.e. state was committed and VBLANK event
+ * received.
+ */
+ tegra_crtc_update_memory_bandwidth(crtc, state, false);
+}
+
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+ .atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
.atomic_enable = tegra_crtc_atomic_enable,
@@ -2036,6 +2373,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): frame end\n", __func__);
*/
+ dc->stats.frames_total++;
dc->stats.frames++;
}
@@ -2044,6 +2382,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_crtc_handle_vblank(&dc->base);
+ dc->stats.vblank_total++;
dc->stats.vblank++;
}
@@ -2051,6 +2390,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): underflow\n", __func__);
*/
+ dc->stats.underflow_total++;
dc->stats.underflow++;
}
@@ -2058,11 +2398,13 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): overflow\n", __func__);
*/
+ dc->stats.overflow_total++;
dc->stats.overflow++;
}
if (status & HEAD_UF_INT) {
dev_dbg_ratelimited(dc->dev, "%s(): head underflow\n", __func__);
+ dc->stats.underflow_total++;
dc->stats.underflow++;
}
@@ -2343,7 +2685,9 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.overlay_formats = tegra20_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = true,
+ .has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = true,
+ .plane_tiled_memory_bandwidth_x2 = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -2363,7 +2707,9 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.overlay_formats = tegra20_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = true,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -2383,7 +2729,9 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.overlay_formats = tegra114_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2403,7 +2751,9 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.overlay_formats = tegra124_overlay_formats,
.modifiers = tegra124_modifiers,
.has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = false,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2423,7 +2773,9 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.overlay_formats = tegra114_overlay_formats,
.modifiers = tegra124_modifiers,
.has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = false,
};
static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
@@ -2473,6 +2825,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.has_nvdisplay = true,
.wgrps = tegra186_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
+ .plane_tiled_memory_bandwidth_x2 = false,
};
static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
@@ -2522,6 +2875,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.has_nvdisplay = true,
.wgrps = tegra194_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
+ .plane_tiled_memory_bandwidth_x2 = false,
};
static const struct of_device_id tegra_dc_of_match[] = {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 5e13f1cfd749..f0cb691852a1 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -15,6 +15,8 @@
struct tegra_output;
+#define TEGRA_DC_LEGACY_PLANES_NUM 7
+
struct tegra_dc_state {
struct drm_crtc_state base;
@@ -33,11 +35,22 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
return NULL;
}
+static inline const struct tegra_dc_state *
+to_const_dc_state(const struct drm_crtc_state *state)
+{
+ return to_dc_state((struct drm_crtc_state *)state);
+}
+
struct tegra_dc_stats {
unsigned long frames;
unsigned long vblank;
unsigned long underflow;
unsigned long overflow;
+
+ unsigned long frames_total;
+ unsigned long vblank_total;
+ unsigned long underflow_total;
+ unsigned long overflow_total;
};
struct tegra_windowgroup_soc {
@@ -66,7 +79,9 @@ struct tegra_dc_soc_info {
unsigned int num_overlay_formats;
const u64 *modifiers;
bool has_win_a_without_filters;
+ bool has_win_b_vfilter_mem_client;
bool has_win_c_without_vert_filter;
+ bool plane_tiled_memory_bandwidth_x2;
};
struct tegra_dc {
@@ -152,6 +167,8 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
unsigned int div);
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
/* from rgb.c */
int tegra_dc_rgb_probe(struct tegra_dc *dc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8c6069b33160..8d37d6b00562 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -21,24 +21,21 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
+#include "dc.h"
#include "drm.h"
#include "gem.h"
+#include "uapi.h"
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
+#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define CARVEOUT_SZ SZ_64M
#define CDMA_GATHER_FETCHES_MAX_NB 16383
-struct tegra_drm_file {
- struct idr contexts;
- struct mutex lock;
-};
-
static int tegra_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state)
{
@@ -60,6 +57,17 @@ static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
+static void tegra_atomic_post_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *old_crtc_state __maybe_unused;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ tegra_crtc_atomic_post_commit(crtc, old_state);
+}
+
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *drm = old_state->dev;
@@ -79,6 +87,8 @@ static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
} else {
drm_atomic_helper_commit_tail_rpm(old_state);
}
+
+ tegra_atomic_post_commit(drm, old_state);
}
static const struct drm_mode_config_helper_funcs
@@ -94,7 +104,9 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- idr_init_base(&fpriv->contexts, 1);
+ idr_init_base(&fpriv->legacy_contexts, 1);
+ xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
+ xa_init(&fpriv->syncpoints);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
@@ -107,20 +119,6 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
kfree(context);
}
-static struct host1x_bo *
-host1x_bo_lookup(struct drm_file *file, u32 handle)
-{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(file, handle);
- if (!gem)
- return NULL;
-
- bo = to_tegra_bo(gem);
- return &bo->base;
-}
-
static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
struct drm_tegra_reloc __user *src,
struct drm_device *drm,
@@ -151,11 +149,11 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
- dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
+ dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
- dest->target.bo = host1x_bo_lookup(file, target);
+ dest->target.bo = tegra_gem_lookup(file, target);
if (!dest->target.bo)
return -ENOENT;
@@ -193,7 +191,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
return -EINVAL;
job = host1x_job_alloc(context->channel, args->num_cmdbufs,
- args->num_relocs);
+ args->num_relocs, false);
if (!job)
return -ENOMEM;
@@ -201,6 +199,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
job->client = client;
job->class = client->class;
job->serialize = true;
+ job->syncpt_recovery = true;
/*
* Track referenced BOs so that they can be unreferenced after the
@@ -237,7 +236,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
}
- bo = host1x_bo_lookup(file, cmdbuf.handle);
+ bo = tegra_gem_lookup(file, cmdbuf.handle);
if (!bo) {
err = -ENOENT;
goto fail;
@@ -432,7 +431,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
if (err < 0)
return err;
- err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
+ err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
return err;
@@ -487,13 +486,13 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = idr_find(&fpriv->contexts, args->context);
+ context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -EINVAL;
goto unlock;
}
- idr_remove(&fpriv->contexts, context->id);
+ idr_remove(&fpriv->legacy_contexts, context->id);
tegra_drm_context_free(context);
unlock:
@@ -512,7 +511,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = idr_find(&fpriv->contexts, args->context);
+ context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -541,7 +540,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = idr_find(&fpriv->contexts, args->context);
+ context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -566,7 +565,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = idr_find(&fpriv->contexts, args->context);
+ context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -735,10 +734,25 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
+ DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
@@ -792,10 +806,11 @@ static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
struct tegra_drm_file *fpriv = file->driver_priv;
mutex_lock(&fpriv->lock);
- idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
+ idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
+ tegra_drm_uapi_close_file(fpriv);
mutex_unlock(&fpriv->lock);
- idr_destroy(&fpriv->contexts);
+ idr_destroy(&fpriv->legacy_contexts);
mutex_destroy(&fpriv->lock);
kfree(fpriv);
}
@@ -853,7 +868,7 @@ static void tegra_debugfs_init(struct drm_minor *minor)
static const struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
- DRIVER_ATOMIC | DRIVER_RENDER,
+ DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -883,6 +898,14 @@ static const struct drm_driver tegra_drm_driver = {
int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
+ /*
+ * When MLOCKs are implemented, change to allocate a shared channel
+ * only when MLOCKs are disabled.
+ */
+ client->shared_channel = host1x_channel_request(&client->base);
+ if (!client->shared_channel)
+ return -EBUSY;
+
mutex_lock(&tegra->clients_lock);
list_add_tail(&client->list, &tegra->clients);
client->drm = tegra;
@@ -899,6 +922,9 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
client->drm = NULL;
mutex_unlock(&tegra->clients_lock);
+ if (client->shared_channel)
+ host1x_channel_put(client->shared_channel);
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0cb868065348..8b28327c931c 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -64,12 +64,22 @@ struct tegra_drm {
struct tegra_display_hub *hub;
};
+static inline struct host1x *tegra_drm_to_host1x(struct tegra_drm *tegra)
+{
+ return dev_get_drvdata(tegra->drm->dev->parent);
+}
+
struct tegra_drm_client;
struct tegra_drm_context {
struct tegra_drm_client *client;
struct host1x_channel *channel;
+
+ /* Only used by legacy UAPI. */
unsigned int id;
+
+ /* Only used by new UAPI. */
+ struct xarray mappings;
};
struct tegra_drm_client_ops {
@@ -91,7 +101,9 @@ struct tegra_drm_client {
struct host1x_client base;
struct list_head list;
struct tegra_drm *drm;
+ struct host1x_channel *shared_channel;
+ /* Set by driver */
unsigned int version;
const struct tegra_drm_client_ops *ops;
};
diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
new file mode 100644
index 000000000000..1824d2db0e2c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/firewall.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010-2020 NVIDIA Corporation */
+
+#include "drm.h"
+#include "submit.h"
+#include "uapi.h"
+
+struct tegra_drm_firewall {
+ struct tegra_drm_submit_data *submit;
+ struct tegra_drm_client *client;
+ u32 *data;
+ u32 pos;
+ u32 end;
+ u32 class;
+};
+
+static int fw_next(struct tegra_drm_firewall *fw, u32 *word)
+{
+ if (fw->pos == fw->end)
+ return -EINVAL;
+
+ *word = fw->data[fw->pos++];
+
+ return 0;
+}
+
+static bool fw_check_addr_valid(struct tegra_drm_firewall *fw, u32 offset)
+{
+ u32 i;
+
+ for (i = 0; i < fw->submit->num_used_mappings; i++) {
+ struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping;
+
+ if (offset >= m->iova && offset <= m->iova_end)
+ return true;
+ }
+
+ return false;
+}
+
+static int fw_check_reg(struct tegra_drm_firewall *fw, u32 offset)
+{
+ bool is_addr;
+ u32 word;
+ int err;
+
+ err = fw_next(fw, &word);
+ if (err)
+ return err;
+
+ if (!fw->client->ops->is_addr_reg)
+ return 0;
+
+ is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ offset);
+
+ if (!is_addr)
+ return 0;
+
+ if (!fw_check_addr_valid(fw, word))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fw_check_regs_seq(struct tegra_drm_firewall *fw, u32 offset,
+ u32 count, bool incr)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ if (fw_check_reg(fw, offset))
+ return -EINVAL;
+
+ if (incr)
+ offset++;
+ }
+
+ return 0;
+}
+
+static int fw_check_regs_mask(struct tegra_drm_firewall *fw, u32 offset,
+ u16 mask)
+{
+ unsigned long bmask = mask;
+ unsigned int bit;
+
+ for_each_set_bit(bit, &bmask, 16) {
+ if (fw_check_reg(fw, offset+bit))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
+{
+ bool is_addr;
+
+ is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ offset);
+ if (is_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fw_check_class(struct tegra_drm_firewall *fw, u32 class)
+{
+ if (!fw->client->ops->is_valid_class) {
+ if (class == fw->client->base.class)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (!fw->client->ops->is_valid_class(class))
+ return -EINVAL;
+
+ return 0;
+}
+
+enum {
+ HOST1X_OPCODE_SETCLASS = 0x00,
+ HOST1X_OPCODE_INCR = 0x01,
+ HOST1X_OPCODE_NONINCR = 0x02,
+ HOST1X_OPCODE_MASK = 0x03,
+ HOST1X_OPCODE_IMM = 0x04,
+ HOST1X_OPCODE_RESTART = 0x05,
+ HOST1X_OPCODE_GATHER = 0x06,
+ HOST1X_OPCODE_SETSTRMID = 0x07,
+ HOST1X_OPCODE_SETAPPID = 0x08,
+ HOST1X_OPCODE_SETPYLD = 0x09,
+ HOST1X_OPCODE_INCR_W = 0x0a,
+ HOST1X_OPCODE_NONINCR_W = 0x0b,
+ HOST1X_OPCODE_GATHER_W = 0x0c,
+ HOST1X_OPCODE_RESTART_W = 0x0d,
+ HOST1X_OPCODE_EXTEND = 0x0e,
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+ u32 words, struct tegra_drm_submit_data *submit,
+ u32 *job_class)
+{
+ struct tegra_drm_firewall fw = {
+ .submit = submit,
+ .client = client,
+ .data = data,
+ .pos = start,
+ .end = start+words,
+ .class = *job_class,
+ };
+ bool payload_valid = false;
+ u32 payload;
+ int err;
+
+ while (fw.pos != fw.end) {
+ u32 word, opcode, offset, count, mask, class;
+
+ err = fw_next(&fw, &word);
+ if (err)
+ return err;
+
+ opcode = (word & 0xf0000000) >> 28;
+
+ switch (opcode) {
+ case HOST1X_OPCODE_SETCLASS:
+ offset = word >> 16 & 0xfff;
+ mask = word & 0x3f;
+ class = (word >> 6) & 0x3ff;
+ err = fw_check_class(&fw, class);
+ fw.class = class;
+ *job_class = class;
+ if (!err)
+ err = fw_check_regs_mask(&fw, offset, mask);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal SETCLASS(offset=0x%x, mask=0x%x, class=0x%x) at word %u",
+ offset, mask, class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_INCR:
+ offset = (word >> 16) & 0xfff;
+ count = word & 0xffff;
+ err = fw_check_regs_seq(&fw, offset, count, true);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal INCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+ offset, count, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_NONINCR:
+ offset = (word >> 16) & 0xfff;
+ count = word & 0xffff;
+ err = fw_check_regs_seq(&fw, offset, count, false);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal NONINCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+ offset, count, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_MASK:
+ offset = (word >> 16) & 0xfff;
+ mask = word & 0xffff;
+ err = fw_check_regs_mask(&fw, offset, mask);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal MASK(offset=0x%x, mask=0x%x) in class 0x%x at word %u",
+ offset, mask, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_IMM:
+ /* IMM cannot reasonably be used to write a pointer */
+ offset = (word >> 16) & 0xfff;
+ err = fw_check_regs_imm(&fw, offset);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal IMM(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_SETPYLD:
+ payload = word & 0xffff;
+ payload_valid = true;
+ break;
+ case HOST1X_OPCODE_INCR_W:
+ if (!payload_valid)
+ return -EINVAL;
+
+ offset = word & 0x3fffff;
+ err = fw_check_regs_seq(&fw, offset, payload, true);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal INCR_W(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_NONINCR_W:
+ if (!payload_valid)
+ return -EINVAL;
+
+ offset = word & 0x3fffff;
+ err = fw_check_regs_seq(&fw, offset, payload, false);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal NONINCR(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ default:
+ dev_warn(client->base.dev, "illegal opcode at word %u",
+ fw.pos-1);
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 26af8daa9a16..6ec598f5d5b3 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -707,3 +707,16 @@ struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
return &bo->gem;
}
+
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(file, handle);
+ if (!gem)
+ return NULL;
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index c15fd99d6cb2..cb5146a67668 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -80,4 +80,6 @@ struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct dma_buf *buf);
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle);
+
#endif
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 2e65b4075ce6..e00ec3f40ec8 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -4,6 +4,7 @@
*/
#include <linux/iommu.h>
+#include <linux/interconnect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -64,6 +65,9 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
copy->reflect_x = state->reflect_x;
copy->reflect_y = state->reflect_y;
copy->opaque = state->opaque;
+ copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
+ copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
+ copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
@@ -244,6 +248,78 @@ void tegra_plane_cleanup_fb(struct drm_plane *plane,
tegra_dc_unpin(dc, to_tegra_plane_state(state));
}
+static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
+{
+ struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
+ unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
+ const struct tegra_dc_soc_info *soc;
+ const struct drm_format_info *fmt;
+ struct drm_crtc_state *crtc_state;
+ u64 avg_bandwidth, peak_bandwidth;
+
+ if (!state->visible)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (!crtc_state)
+ return -EINVAL;
+
+ src_w = drm_rect_width(&state->src) >> 16;
+ src_h = drm_rect_height(&state->src) >> 16;
+ dst_w = drm_rect_width(&state->dst);
+ dst_h = drm_rect_height(&state->dst);
+
+ fmt = state->fb->format;
+ soc = to_tegra_dc(state->crtc)->soc;
+
+ /*
+ * Note that real memory bandwidth vary depending on format and
+ * memory layout, we are not taking that into account because small
+ * estimation error isn't important since bandwidth is rounded up
+ * anyway.
+ */
+ for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
+ unsigned int bpp_plane = fmt->cpp[i] * 8;
+
+ /*
+ * Sub-sampling is relevant for chroma planes only and vertical
+ * readouts are not cached, hence only horizontal sub-sampling
+ * matters.
+ */
+ if (i > 0)
+ bpp_plane /= fmt->hsub;
+
+ bpp += bpp_plane;
+ }
+
+ /* average bandwidth in kbytes/sec */
+ avg_bandwidth = min(src_w, dst_w) * min(src_h, dst_h);
+ avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
+ avg_bandwidth = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
+ do_div(avg_bandwidth, 1000);
+
+ /* mode.clock in kHz, peak bandwidth in kbytes/sec */
+ peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
+
+ /*
+ * Tegra30/114 Memory Controller can't interleave DC memory requests
+ * for the tiled windows because DC uses 16-bytes atom, while DDR3
+ * uses 32-bytes atom. Hence there is x2 memory overfetch for tiled
+ * framebuffer and DDR3 on these SoCs.
+ */
+ if (soc->plane_tiled_memory_bandwidth_x2 &&
+ tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
+ mul = 2;
+ else
+ mul = 1;
+
+ /* ICC bandwidth in kbytes/sec */
+ tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
+ tegra_state->avg_memory_bandwidth = kBps_to_icc(avg_bandwidth) * mul;
+
+ return 0;
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
@@ -262,6 +338,10 @@ int tegra_plane_state_add(struct tegra_plane *plane,
if (err < 0)
return err;
+ err = tegra_plane_calculate_memory_bandwidth(state);
+ if (err < 0)
+ return err;
+
tegra = to_dc_state(crtc_state);
tegra->planes |= WIN_A_ACT_REQ << plane->index;
@@ -646,3 +726,40 @@ int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
return 0;
}
+
+static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
+ "wina", "winb", "winc", NULL, NULL, NULL, "cursor",
+};
+
+int tegra_plane_interconnect_init(struct tegra_plane *plane)
+{
+ const char *icc_name = tegra_plane_icc_names[plane->index];
+ struct device *dev = plane->dc->dev;
+ struct tegra_dc *dc = plane->dc;
+ int err;
+
+ if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
+ WARN_ON(!tegra_plane_icc_names[plane->index]))
+ return -EINVAL;
+
+ plane->icc_mem = devm_of_icc_get(dev, icc_name);
+ err = PTR_ERR_OR_ZERO(plane->icc_mem);
+ if (err) {
+ dev_err_probe(dev, err, "failed to get %s interconnect\n",
+ icc_name);
+ return err;
+ }
+
+ /* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
+ if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
+ plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
+ err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
+ if (err) {
+ dev_err_probe(dev, err, "failed to get %s interconnect\n",
+ "winb-vfilter");
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 1785c1559c0c..d9470780c803 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -8,6 +8,7 @@
#include <drm/drm_plane.h>
+struct icc_path;
struct tegra_bo;
struct tegra_dc;
@@ -16,6 +17,9 @@ struct tegra_plane {
struct tegra_dc *dc;
unsigned int offset;
unsigned int index;
+
+ struct icc_path *icc_mem;
+ struct icc_path *icc_mem_vfilter;
};
struct tegra_cursor {
@@ -52,6 +56,11 @@ struct tegra_plane_state {
/* used for legacy blending support only */
struct tegra_plane_legacy_blending_state blending[2];
bool opaque;
+
+ /* bandwidths are in ICC units, i.e. kbytes/sec */
+ u32 total_peak_memory_bandwidth;
+ u32 peak_memory_bandwidth;
+ u32 avg_memory_bandwidth;
};
static inline struct tegra_plane_state *
@@ -63,6 +72,12 @@ to_tegra_plane_state(struct drm_plane_state *state)
return NULL;
}
+static inline const struct tegra_plane_state *
+to_const_tegra_plane_state(const struct drm_plane_state *state)
+{
+ return to_tegra_plane_state((struct drm_plane_state *)state);
+}
+
extern const struct drm_plane_funcs tegra_plane_funcs;
int tegra_plane_prepare_fb(struct drm_plane *plane,
@@ -78,5 +93,6 @@ bool tegra_plane_format_is_indexed(unsigned int format);
bool tegra_plane_format_is_yuv(unsigned int format, bool *planar, unsigned int *bpc);
int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
struct tegra_plane_state *state);
+int tegra_plane_interconnect_init(struct tegra_plane *plane);
#endif /* TEGRA_PLANE_H */
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
new file mode 100644
index 000000000000..776f825df52f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/dma-fence-array.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "submit.h"
+#include "uapi.h"
+
+#define SUBMIT_ERR(context, fmt, ...) \
+ dev_err_ratelimited(context->client->base.dev, \
+ "%s: job submission failed: " fmt "\n", \
+ current->comm, ##__VA_ARGS__)
+
+struct gather_bo {
+ struct host1x_bo base;
+
+ struct kref ref;
+
+ struct device *dev;
+ u32 *gather_data;
+ dma_addr_t gather_data_dma;
+ size_t gather_data_words;
+};
+
+static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ kref_get(&bo->ref);
+
+ return host_bo;
+}
+
+static void gather_bo_release(struct kref *ref)
+{
+ struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
+
+ dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
+ 0);
+ kfree(bo);
+}
+
+static void gather_bo_put(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ kref_put(&bo->ref, gather_bo_release);
+}
+
+static struct sg_table *
+gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+ struct sg_table *sgt;
+ int err;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma,
+ bo->gather_data_words * 4);
+ if (err) {
+ kfree(sgt);
+ return ERR_PTR(err);
+ }
+
+ return sgt;
+}
+
+static void gather_bo_unpin(struct device *dev, struct sg_table *sgt)
+{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+static void *gather_bo_mmap(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ return bo->gather_data;
+}
+
+static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
+{
+}
+
+const struct host1x_bo_ops gather_bo_ops = {
+ .get = gather_bo_get,
+ .put = gather_bo_put,
+ .pin = gather_bo_pin,
+ .unpin = gather_bo_unpin,
+ .mmap = gather_bo_mmap,
+ .munmap = gather_bo_munmap,
+};
+
+static struct tegra_drm_mapping *
+tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
+{
+ struct tegra_drm_mapping *mapping;
+
+ xa_lock(&context->mappings);
+
+ mapping = xa_load(&context->mappings, id);
+ if (mapping)
+ kref_get(&mapping->ref);
+
+ xa_unlock(&context->mappings);
+
+ return mapping;
+}
+
+static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
+{
+ size_t copy_len;
+ void *data;
+
+ if (check_mul_overflow(count, size, &copy_len))
+ return ERR_PTR(-EINVAL);
+
+ if (copy_len > 0x4000)
+ return ERR_PTR(-E2BIG);
+
+ data = kvmalloc(copy_len, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(data, from, copy_len)) {
+ kvfree(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return data;
+}
+
+static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
+ struct tegra_drm_context *context,
+ struct drm_tegra_channel_submit *args)
+{
+ struct gather_bo *bo;
+ size_t copy_len;
+
+ if (args->gather_data_words == 0) {
+ SUBMIT_ERR(context, "gather_data_words cannot be zero");
+ return -EINVAL;
+ }
+
+ if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) {
+ SUBMIT_ERR(context, "gather_data_words is too large");
+ return -EINVAL;
+ }
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo) {
+ SUBMIT_ERR(context, "failed to allocate memory for bo info");
+ return -ENOMEM;
+ }
+
+ host1x_bo_init(&bo->base, &gather_bo_ops);
+ kref_init(&bo->ref);
+ bo->dev = dev;
+
+ bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
+ GFP_KERNEL | __GFP_NOWARN, 0);
+ if (!bo->gather_data) {
+ SUBMIT_ERR(context, "failed to allocate memory for gather data");
+ kfree(bo);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
+ SUBMIT_ERR(context, "failed to copy gather data from userspace");
+ dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
+ kfree(bo);
+ return -EFAULT;
+ }
+
+ bo->gather_data_words = args->gather_data_words;
+
+ *pbo = bo;
+
+ return 0;
+}
+
+static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
+{
+ /* TODO check that target_offset is within bounds */
+ dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
+ u32 written_ptr;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
+ iova |= BIT_ULL(39);
+#endif
+
+ written_ptr = iova >> buf->reloc.shift;
+
+ if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
+ SUBMIT_ERR(context,
+ "relocation has too large gather offset (%u vs gather length %zu)",
+ buf->reloc.gather_offset_words, bo->gather_data_words);
+ return -EINVAL;
+ }
+
+ buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
+ bo->gather_data_words);
+
+ bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
+
+ return 0;
+}
+
+static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_channel_submit *args,
+ struct tegra_drm_submit_data *job_data)
+{
+ struct tegra_drm_used_mapping *mappings;
+ struct drm_tegra_submit_buf *bufs;
+ int err;
+ u32 i;
+
+ bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
+ sizeof(*bufs));
+ if (IS_ERR(bufs)) {
+ SUBMIT_ERR(context, "failed to copy bufs array from userspace");
+ return PTR_ERR(bufs);
+ }
+
+ mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
+ if (!mappings) {
+ SUBMIT_ERR(context, "failed to allocate memory for mapping info");
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < args->num_bufs; i++) {
+ struct drm_tegra_submit_buf *buf = &bufs[i];
+ struct tegra_drm_mapping *mapping;
+
+ if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
+ SUBMIT_ERR(context, "invalid flag specified for buffer");
+ err = -EINVAL;
+ goto drop_refs;
+ }
+
+ mapping = tegra_drm_mapping_get(context, buf->mapping);
+ if (!mapping) {
+ SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
+ err = -EINVAL;
+ goto drop_refs;
+ }
+
+ err = submit_write_reloc(context, bo, buf, mapping);
+ if (err) {
+ tegra_drm_mapping_put(mapping);
+ goto drop_refs;
+ }
+
+ mappings[i].mapping = mapping;
+ mappings[i].flags = buf->flags;
+ }
+
+ job_data->used_mappings = mappings;
+ job_data->num_used_mappings = i;
+
+ err = 0;
+
+ goto done;
+
+drop_refs:
+ while (i--)
+ tegra_drm_mapping_put(mappings[i].mapping);
+
+ kfree(mappings);
+ job_data->used_mappings = NULL;
+
+done:
+ kvfree(bufs);
+
+ return err;
+}
+
+static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
+ struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
+{
+ struct host1x_syncpt *sp;
+
+ if (args->syncpt.flags) {
+ SUBMIT_ERR(context, "invalid flag specified for syncpt");
+ return -EINVAL;
+ }
+
+ /* Syncpt ref will be dropped on job release */
+ sp = xa_load(syncpoints, args->syncpt.id);
+ if (!sp) {
+ SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
+ return -EINVAL;
+ }
+
+ job->syncpt = host1x_syncpt_get(sp);
+ job->syncpt_incrs = args->syncpt.increments;
+
+ return 0;
+}
+
+static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
+ struct drm_tegra_submit_cmd_gather_uptr *cmd,
+ struct gather_bo *bo, u32 *offset,
+ struct tegra_drm_submit_data *job_data,
+ u32 *class)
+{
+ u32 next_offset;
+
+ if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
+ SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
+ return -EINVAL;
+ }
+
+ /* Check for maximum gather size */
+ if (cmd->words > 16383) {
+ SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
+ return -EINVAL;
+ }
+
+ if (check_add_overflow(*offset, cmd->words, &next_offset)) {
+ SUBMIT_ERR(context, "too many total words in job");
+ return -EINVAL;
+ }
+
+ if (next_offset > bo->gather_data_words) {
+ SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
+ return -EINVAL;
+ }
+
+ if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
+ cmd->words, job_data, class)) {
+ SUBMIT_ERR(context, "job was rejected by firewall");
+ return -EINVAL;
+ }
+
+ host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
+
+ *offset = next_offset;
+
+ return 0;
+}
+
+static struct host1x_job *
+submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
+ struct xarray *syncpoints)
+{
+ struct drm_tegra_submit_cmd *cmds;
+ u32 i, gather_offset = 0, class;
+ struct host1x_job *job;
+ int err;
+
+ /* Set initial class for firewall. */
+ class = context->client->base.class;
+
+ cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
+ sizeof(*cmds));
+ if (IS_ERR(cmds)) {
+ SUBMIT_ERR(context, "failed to copy cmds array from userspace");
+ return ERR_CAST(cmds);
+ }
+
+ job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
+ if (!job) {
+ SUBMIT_ERR(context, "failed to allocate memory for job");
+ job = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ err = submit_get_syncpt(context, job, syncpoints, args);
+ if (err < 0)
+ goto free_job;
+
+ job->client = &context->client->base;
+ job->class = context->client->base.class;
+ job->serialize = true;
+
+ for (i = 0; i < args->num_cmds; i++) {
+ struct drm_tegra_submit_cmd *cmd = &cmds[i];
+
+ if (cmd->flags) {
+ SUBMIT_ERR(context, "unknown flags given for cmd");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
+ err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
+ &gather_offset, job_data, &class);
+ if (err)
+ goto free_job;
+ } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
+ if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+ SUBMIT_ERR(context, "non-zero reserved value");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+ false, class);
+ } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
+ if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+ SUBMIT_ERR(context, "non-zero reserved value");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ if (cmd->wait_syncpt.id != args->syncpt.id) {
+ SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+ true, class);
+ } else {
+ SUBMIT_ERR(context, "unknown cmd type");
+ err = -EINVAL;
+ goto free_job;
+ }
+ }
+
+ if (gather_offset == 0) {
+ SUBMIT_ERR(context, "job must have at least one gather");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ goto done;
+
+free_job:
+ host1x_job_put(job);
+ job = ERR_PTR(err);
+
+done:
+ kvfree(cmds);
+
+ return job;
+}
+
+static void release_job(struct host1x_job *job)
+{
+ struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
+ struct tegra_drm_submit_data *job_data = job->user_data;
+ u32 i;
+
+ for (i = 0; i < job_data->num_used_mappings; i++)
+ tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+ kfree(job_data->used_mappings);
+ kfree(job_data);
+
+ if (pm_runtime_enabled(client->base.dev))
+ pm_runtime_put_autosuspend(client->base.dev);
+}
+
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_submit *args = data;
+ struct tegra_drm_submit_data *job_data;
+ struct drm_syncobj *syncobj = NULL;
+ struct tegra_drm_context *context;
+ struct host1x_job *job;
+ struct gather_bo *bo;
+ u32 i;
+ int err;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
+ current->comm, args->context);
+ return -EINVAL;
+ }
+
+ if (args->syncobj_in) {
+ struct dma_fence *fence;
+
+ err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
+ if (err) {
+ SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
+ goto unlock;
+ }
+
+ err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
+ dma_fence_put(fence);
+ if (err) {
+ SUBMIT_ERR(context, "wait for syncobj_in timed out");
+ goto unlock;
+ }
+ }
+
+ if (args->syncobj_out) {
+ syncobj = drm_syncobj_find(file, args->syncobj_out);
+ if (!syncobj) {
+ SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
+ err = -ENOENT;
+ goto unlock;
+ }
+ }
+
+ /* Allocate gather BO and copy gather words in. */
+ err = submit_copy_gather_data(&bo, drm->dev, context, args);
+ if (err)
+ goto unlock;
+
+ job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
+ if (!job_data) {
+ SUBMIT_ERR(context, "failed to allocate memory for job data");
+ err = -ENOMEM;
+ goto put_bo;
+ }
+
+ /* Get data buffer mappings and do relocation patching. */
+ err = submit_process_bufs(context, bo, args, job_data);
+ if (err)
+ goto free_job_data;
+
+ /* Allocate host1x_job and add gathers and waits to it. */
+ job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto free_job_data;
+ }
+
+ /* Map gather data for Host1x. */
+ err = host1x_job_pin(job, context->client->base.dev);
+ if (err) {
+ SUBMIT_ERR(context, "failed to pin job: %d", err);
+ goto put_job;
+ }
+
+ /* Boot engine. */
+ if (pm_runtime_enabled(context->client->base.dev)) {
+ err = pm_runtime_resume_and_get(context->client->base.dev);
+ if (err < 0) {
+ SUBMIT_ERR(context, "could not power up engine: %d", err);
+ goto unpin_job;
+ }
+ }
+
+ job->user_data = job_data;
+ job->release = release_job;
+ job->timeout = 10000;
+
+ /*
+ * job_data is now part of job reference counting, so don't release
+ * it from here.
+ */
+ job_data = NULL;
+
+ /* Submit job to hardware. */
+ err = host1x_job_submit(job);
+ if (err) {
+ SUBMIT_ERR(context, "host1x job submission failed: %d", err);
+ goto unpin_job;
+ }
+
+ /* Return postfences to userspace and add fences to DMA reservations. */
+ args->syncpt.value = job->syncpt_end;
+
+ if (syncobj) {
+ struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ SUBMIT_ERR(context, "failed to create postfence: %d", err);
+ }
+
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
+ goto put_job;
+
+unpin_job:
+ host1x_job_unpin(job);
+put_job:
+ host1x_job_put(job);
+free_job_data:
+ if (job_data && job_data->used_mappings) {
+ for (i = 0; i < job_data->num_used_mappings; i++)
+ tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+ kfree(job_data->used_mappings);
+ }
+
+ if (job_data)
+ kfree(job_data);
+put_bo:
+ gather_bo_put(&bo->base);
+unlock:
+ if (syncobj)
+ drm_syncobj_put(syncobj);
+
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/submit.h b/drivers/gpu/drm/tegra/submit.h
new file mode 100644
index 000000000000..cf6a2f0a29fc
--- /dev/null
+++ b/drivers/gpu/drm/tegra/submit.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_SUBMIT_H
+#define _TEGRA_DRM_UAPI_SUBMIT_H
+
+struct tegra_drm_used_mapping {
+ struct tegra_drm_mapping *mapping;
+ u32 flags;
+};
+
+struct tegra_drm_submit_data {
+ struct tegra_drm_used_mapping *used_mappings;
+ u32 num_used_mappings;
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+ u32 words, struct tegra_drm_submit_data *submit,
+ u32 *job_class);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
new file mode 100644
index 000000000000..dc16a24f4dbe
--- /dev/null
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "drm.h"
+#include "uapi.h"
+
+static void tegra_drm_mapping_release(struct kref *ref)
+{
+ struct tegra_drm_mapping *mapping =
+ container_of(ref, struct tegra_drm_mapping, ref);
+
+ if (mapping->sgt)
+ dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+ host1x_bo_put(mapping->bo);
+
+ kfree(mapping);
+}
+
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
+{
+ kref_put(&mapping->ref, tegra_drm_mapping_release);
+}
+
+static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
+{
+ struct tegra_drm_mapping *mapping;
+ unsigned long id;
+
+ xa_for_each(&context->mappings, id, mapping)
+ tegra_drm_mapping_put(mapping);
+
+ xa_destroy(&context->mappings);
+
+ host1x_channel_put(context->channel);
+
+ kfree(context);
+}
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
+{
+ struct tegra_drm_context *context;
+ struct host1x_syncpt *sp;
+ unsigned long id;
+
+ xa_for_each(&file->contexts, id, context)
+ tegra_drm_channel_context_close(context);
+
+ xa_for_each(&file->syncpoints, id, sp)
+ host1x_syncpt_put(sp);
+
+ xa_destroy(&file->contexts);
+ xa_destroy(&file->syncpoints);
+}
+
+static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
+{
+ struct tegra_drm_client *client;
+
+ list_for_each_entry(client, &tegra->clients, list)
+ if (client->base.class == class)
+ return client;
+
+ return NULL;
+}
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_tegra_channel_open *args = data;
+ struct tegra_drm_client *client = NULL;
+ struct tegra_drm_context *context;
+ int err;
+
+ if (args->flags)
+ return -EINVAL;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ client = tegra_drm_find_client(tegra, args->host1x_class);
+ if (!client) {
+ err = -ENODEV;
+ goto free;
+ }
+
+ if (client->shared_channel) {
+ context->channel = host1x_channel_get(client->shared_channel);
+ } else {
+ context->channel = host1x_channel_request(&client->base);
+ if (!context->channel) {
+ err = -EBUSY;
+ goto free;
+ }
+ }
+
+ err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
+ GFP_KERNEL);
+ if (err < 0)
+ goto put_channel;
+
+ context->client = client;
+ xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
+
+ args->version = client->version;
+ args->capabilities = 0;
+
+ if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
+ args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
+
+ return 0;
+
+put_channel:
+ host1x_channel_put(context->channel);
+free:
+ kfree(context);
+
+ return err;
+}
+
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_close *args = data;
+ struct tegra_drm_context *context;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ xa_erase(&fpriv->contexts, args->context);
+
+ mutex_unlock(&fpriv->lock);
+
+ tegra_drm_channel_context_close(context);
+
+ return 0;
+}
+
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_map *args = data;
+ struct tegra_drm_mapping *mapping;
+ struct tegra_drm_context *context;
+ int err = 0;
+
+ if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
+ return -EINVAL;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ kref_init(&mapping->ref);
+
+ mapping->dev = context->client->base.dev;
+ mapping->bo = tegra_gem_lookup(file, args->handle);
+ if (!mapping->bo) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ if (context->client->base.group) {
+ /* IOMMU domain managed directly using IOMMU API */
+ host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
+ } else {
+ switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
+ case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
+ mapping->direction = DMA_BIDIRECTIONAL;
+ break;
+
+ case DRM_TEGRA_CHANNEL_MAP_WRITE:
+ mapping->direction = DMA_FROM_DEVICE;
+ break;
+
+ case DRM_TEGRA_CHANNEL_MAP_READ:
+ mapping->direction = DMA_TO_DEVICE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
+ if (IS_ERR(mapping->sgt)) {
+ err = PTR_ERR(mapping->sgt);
+ goto put_gem;
+ }
+
+ err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (err)
+ goto unpin;
+
+ mapping->iova = sg_dma_address(mapping->sgt->sgl);
+ }
+
+ mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+
+ err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
+ GFP_KERNEL);
+ if (err < 0)
+ goto unmap;
+
+ mutex_unlock(&fpriv->lock);
+
+ return 0;
+
+unmap:
+ if (mapping->sgt) {
+ dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+unpin:
+ host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+put_gem:
+ host1x_bo_put(mapping->bo);
+ kfree(mapping);
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_unmap *args = data;
+ struct tegra_drm_mapping *mapping;
+ struct tegra_drm_context *context;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ mapping = xa_erase(&context->mappings, args->mapping);
+
+ mutex_unlock(&fpriv->lock);
+
+ if (!mapping)
+ return -EINVAL;
+
+ tegra_drm_mapping_put(mapping);
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_syncpoint_allocate *args = data;
+ struct host1x_syncpt *sp;
+ int err;
+
+ if (args->id)
+ return -EINVAL;
+
+ sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
+ if (!sp)
+ return -EBUSY;
+
+ args->id = host1x_syncpt_id(sp);
+
+ err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
+ if (err) {
+ host1x_syncpt_put(sp);
+ return err;
+ }
+
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_syncpoint_allocate *args = data;
+ struct host1x_syncpt *sp;
+
+ mutex_lock(&fpriv->lock);
+ sp = xa_erase(&fpriv->syncpoints, args->id);
+ mutex_unlock(&fpriv->lock);
+
+ if (!sp)
+ return -EINVAL;
+
+ host1x_syncpt_put(sp);
+
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+ struct drm_tegra_syncpoint_wait *args = data;
+ signed long timeout_jiffies;
+ struct host1x_syncpt *sp;
+
+ if (args->padding != 0)
+ return -EINVAL;
+
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
+}
diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h
new file mode 100644
index 000000000000..12adad770ad3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/uapi.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_H
+#define _TEGRA_DRM_UAPI_H
+
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/xarray.h>
+
+#include <drm/drm.h>
+
+struct drm_file;
+struct drm_device;
+
+struct tegra_drm_file {
+ /* Legacy UAPI state */
+ struct idr legacy_contexts;
+ struct mutex lock;
+
+ /* New UAPI state */
+ struct xarray contexts;
+ struct xarray syncpoints;
+};
+
+struct tegra_drm_mapping {
+ struct kref ref;
+
+ struct device *dev;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ enum dma_data_direction direction;
+ dma_addr_t iova;
+ dma_addr_t iova_end;
+};
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data,
+ struct drm_file *file);
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file);
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index c9d55a9a3180..c02010ff2b7f 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -29,7 +29,6 @@ struct vic_config {
struct vic {
struct falcon falcon;
- bool booted;
void __iomem *regs;
struct tegra_drm_client client;
@@ -52,48 +51,6 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
writel(value, vic->regs + offset);
}
-static int vic_runtime_resume(struct device *dev)
-{
- struct vic *vic = dev_get_drvdata(dev);
- int err;
-
- err = clk_prepare_enable(vic->clk);
- if (err < 0)
- return err;
-
- usleep_range(10, 20);
-
- err = reset_control_deassert(vic->rst);
- if (err < 0)
- goto disable;
-
- usleep_range(10, 20);
-
- return 0;
-
-disable:
- clk_disable_unprepare(vic->clk);
- return err;
-}
-
-static int vic_runtime_suspend(struct device *dev)
-{
- struct vic *vic = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(vic->rst);
- if (err < 0)
- return err;
-
- usleep_range(2000, 4000);
-
- clk_disable_unprepare(vic->clk);
-
- vic->booted = false;
-
- return 0;
-}
-
static int vic_boot(struct vic *vic)
{
#ifdef CONFIG_IOMMU_API
@@ -103,9 +60,6 @@ static int vic_boot(struct vic *vic)
void *hdr;
int err = 0;
- if (vic->booted)
- return 0;
-
#ifdef CONFIG_IOMMU_API
if (vic->config->supports_sid && spec) {
u32 value;
@@ -168,8 +122,6 @@ static int vic_boot(struct vic *vic)
return err;
}
- vic->booted = true;
-
return 0;
}
@@ -323,35 +275,74 @@ cleanup:
return err;
}
-static int vic_open_channel(struct tegra_drm_client *client,
- struct tegra_drm_context *context)
+
+static int vic_runtime_resume(struct device *dev)
{
- struct vic *vic = to_vic(client);
+ struct vic *vic = dev_get_drvdata(dev);
int err;
- err = pm_runtime_resume_and_get(vic->dev);
+ err = clk_prepare_enable(vic->clk);
if (err < 0)
return err;
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(vic->rst);
+ if (err < 0)
+ goto disable;
+
+ usleep_range(10, 20);
+
err = vic_load_firmware(vic);
if (err < 0)
- goto rpm_put;
+ goto assert;
err = vic_boot(vic);
if (err < 0)
- goto rpm_put;
+ goto assert;
+
+ return 0;
+
+assert:
+ reset_control_assert(vic->rst);
+disable:
+ clk_disable_unprepare(vic->clk);
+ return err;
+}
+
+static int vic_runtime_suspend(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(vic->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ clk_disable_unprepare(vic->clk);
+
+ return 0;
+}
+
+static int vic_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(client);
+ int err;
+
+ err = pm_runtime_resume_and_get(vic->dev);
+ if (err < 0)
+ return err;
context->channel = host1x_channel_get(vic->channel);
if (!context->channel) {
- err = -ENOMEM;
- goto rpm_put;
+ pm_runtime_put(vic->dev);
+ return -ENOMEM;
}
return 0;
-
-rpm_put:
- pm_runtime_put(vic->dev);
- return err;
}
static void vic_close_channel(struct tegra_drm_context *context)
@@ -359,7 +350,6 @@ static void vic_close_channel(struct tegra_drm_context *context)
struct vic *vic = to_vic(context->client);
host1x_channel_put(context->channel);
-
pm_runtime_put(vic->dev);
}
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 096017b8789d..d2b6f7de0498 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -9,6 +9,7 @@ host1x-y = \
job.o \
debug.o \
mipi.o \
+ fence.o \
hw/host1x01.o \
hw/host1x02.o \
hw/host1x04.o \
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 6e6ca774f68d..765e5aa64eb6 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -312,10 +312,6 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
bool signal = false;
struct host1x_job *job, *n;
- /* If CDMA is stopped, queue is cleared and we can return */
- if (!cdma->running)
- return;
-
/*
* Walk the sync queue, reading the sync point registers as necessary,
* to consume as many sync queue entries as possible without blocking
@@ -324,7 +320,8 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
struct host1x_syncpt *sp = job->syncpt;
/* Check whether this syncpt has completed, and bail if not */
- if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
+ if (!host1x_syncpt_is_expired(sp, job->syncpt_end) &&
+ !job->cancelled) {
/* Start timer on next pending syncpt */
if (job->timeout)
cdma_start_timer_locked(cdma, job);
@@ -413,8 +410,11 @@ syncpt_incr:
else
restart_addr = cdma->last_pos;
+ if (!job)
+ goto resume;
+
/* do CPU increments for the remaining syncpts */
- if (job) {
+ if (job->syncpt_recovery) {
dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
__func__);
@@ -433,8 +433,44 @@ syncpt_incr:
dev_dbg(dev, "%s: finished sync_queue modification\n",
__func__);
+ } else {
+ struct host1x_job *failed_job = job;
+
+ host1x_job_dump(dev, job);
+
+ host1x_syncpt_set_locked(job->syncpt);
+ failed_job->cancelled = true;
+
+ list_for_each_entry_continue(job, &cdma->sync_queue, list) {
+ unsigned int i;
+
+ if (job->syncpt != failed_job->syncpt)
+ continue;
+
+ for (i = 0; i < job->num_slots; i++) {
+ unsigned int slot = (job->first_get/8 + i) %
+ HOST1X_PUSHBUFFER_SLOTS;
+ u32 *mapped = cdma->push_buffer.mapped;
+
+ /*
+ * Overwrite opcodes with 0 word writes
+ * to offset 0xbad. This does nothing but
+ * has a easily detected signature in debug
+ * traces.
+ */
+ mapped[2*slot+0] = 0x1bad0000;
+ mapped[2*slot+1] = 0x1bad0000;
+ }
+
+ job->cancelled = true;
+ }
+
+ wmb();
+
+ update_cdma_locked(cdma);
}
+resume:
/* roll back DMAGET and start up channel again */
host1x_hw_cdma_resume(host1x, cdma, restart_addr);
}
@@ -490,6 +526,16 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
mutex_lock(&cdma->lock);
+ /*
+ * Check if syncpoint was locked due to previous job timeout.
+ * This needs to be done within the cdma lock to avoid a race
+ * with the timeout handler.
+ */
+ if (job->syncpt->locked) {
+ mutex_unlock(&cdma->lock);
+ return -EPERM;
+ }
+
if (job->timeout) {
/* init state on first submit with timeout value */
if (!cdma->timeout.initialized) {
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
new file mode 100644
index 000000000000..6941add95d0f
--- /dev/null
+++ b/drivers/gpu/host1x/fence.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Syncpoint dma_fence implementation
+ *
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "fence.h"
+#include "intr.h"
+#include "syncpt.h"
+
+DEFINE_SPINLOCK(lock);
+
+struct host1x_syncpt_fence {
+ struct dma_fence base;
+
+ atomic_t signaling;
+
+ struct host1x_syncpt *sp;
+ u32 threshold;
+
+ struct host1x_waitlist *waiter;
+ void *waiter_ref;
+
+ struct delayed_work timeout_work;
+};
+
+static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
+{
+ return "host1x";
+}
+
+static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "syncpoint";
+}
+
+static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
+{
+ return container_of(f, struct host1x_syncpt_fence, base);
+}
+
+static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
+{
+ struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+ int err;
+
+ if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
+ return false;
+
+ dma_fence_get(f);
+
+ /*
+ * The dma_fence framework requires the fence driver to keep a
+ * reference to any fences for which 'enable_signaling' has been
+ * called (and that have not been signalled).
+ *
+ * We provide a userspace API to create arbitrary syncpoint fences,
+ * so we cannot normally guarantee that all fences get signalled.
+ * As such, setup a timeout, so that long-lasting fences will get
+ * reaped eventually.
+ */
+ schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
+
+ err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
+ HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
+ sf->waiter, &sf->waiter_ref);
+ if (err) {
+ cancel_delayed_work_sync(&sf->timeout_work);
+ dma_fence_put(f);
+ return false;
+ }
+
+ /* intr framework takes ownership of waiter */
+ sf->waiter = NULL;
+
+ /*
+ * The fence may get signalled at any time after the above call,
+ * so we need to initialize all state used by signalling
+ * before it.
+ */
+
+ return true;
+}
+
+static void host1x_syncpt_fence_release(struct dma_fence *f)
+{
+ struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+
+ if (sf->waiter)
+ kfree(sf->waiter);
+
+ dma_fence_free(f);
+}
+
+const struct dma_fence_ops host1x_syncpt_fence_ops = {
+ .get_driver_name = host1x_syncpt_fence_get_driver_name,
+ .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
+ .enable_signaling = host1x_syncpt_fence_enable_signaling,
+ .release = host1x_syncpt_fence_release,
+};
+
+void host1x_fence_signal(struct host1x_syncpt_fence *f)
+{
+ if (atomic_xchg(&f->signaling, 1))
+ return;
+
+ /*
+ * Cancel pending timeout work - if it races, it will
+ * not get 'f->signaling' and return.
+ */
+ cancel_delayed_work_sync(&f->timeout_work);
+
+ host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
+
+ dma_fence_signal(&f->base);
+ dma_fence_put(&f->base);
+}
+
+static void do_fence_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = (struct delayed_work *)work;
+ struct host1x_syncpt_fence *f =
+ container_of(dwork, struct host1x_syncpt_fence, timeout_work);
+
+ if (atomic_xchg(&f->signaling, 1))
+ return;
+
+ /*
+ * Cancel pending timeout work - if it races, it will
+ * not get 'f->signaling' and return.
+ */
+ host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
+
+ dma_fence_set_error(&f->base, -ETIMEDOUT);
+ dma_fence_signal(&f->base);
+ dma_fence_put(&f->base);
+}
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
+{
+ struct host1x_syncpt_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
+ if (!fence->waiter)
+ return ERR_PTR(-ENOMEM);
+
+ fence->sp = sp;
+ fence->threshold = threshold;
+
+ dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
+ dma_fence_context_alloc(1), 0);
+
+ INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
+
+ return &fence->base;
+}
+EXPORT_SYMBOL(host1x_fence_create);
diff --git a/drivers/gpu/host1x/fence.h b/drivers/gpu/host1x/fence.h
new file mode 100644
index 000000000000..70c91de82f14
--- /dev/null
+++ b/drivers/gpu/host1x/fence.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#ifndef HOST1X_FENCE_H
+#define HOST1X_FENCE_H
+
+struct host1x_syncpt_fence;
+
+void host1x_fence_signal(struct host1x_syncpt_fence *fence);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d4c28faf27d1..1999780a7203 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -47,39 +47,84 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
}
}
-static void submit_gathers(struct host1x_job *job)
+static void submit_wait(struct host1x_cdma *cdma, u32 id, u32 threshold,
+ u32 next_class)
+{
+#if HOST1X_HW >= 2
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_setclass(
+ HOST1X_CLASS_HOST1X,
+ HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
+ /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
+ BIT(0) | BIT(2)
+ ),
+ threshold,
+ id,
+ host1x_opcode_setclass(next_class, 0, 0)
+ );
+#else
+ /* TODO add waitchk or use waitbases or other mitigation */
+ host1x_cdma_push(cdma,
+ host1x_opcode_setclass(
+ HOST1X_CLASS_HOST1X,
+ host1x_uclass_wait_syncpt_r(),
+ BIT(0)
+ ),
+ host1x_class_host_wait_syncpt(id, threshold)
+ );
+ host1x_cdma_push(cdma,
+ host1x_opcode_setclass(next_class, 0, 0),
+ HOST1X_OPCODE_NOP
+ );
+#endif
+}
+
+static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW < 6
struct device *dev = job->channel->dev;
#endif
unsigned int i;
+ u32 threshold;
- for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
- dma_addr_t addr = g->base + g->offset;
- u32 op2, op3;
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &job->cmds[i];
- op2 = lower_32_bits(addr);
- op3 = upper_32_bits(addr);
+ if (cmd->is_wait) {
+ if (cmd->wait.relative)
+ threshold = job_syncpt_base + cmd->wait.threshold;
+ else
+ threshold = cmd->wait.threshold;
- trace_write_gather(cdma, g->bo, g->offset, g->words);
+ submit_wait(cdma, cmd->wait.id, threshold, cmd->wait.next_class);
+ } else {
+ struct host1x_job_gather *g = &cmd->gather;
+
+ dma_addr_t addr = g->base + g->offset;
+ u32 op2, op3;
+
+ op2 = lower_32_bits(addr);
+ op3 = upper_32_bits(addr);
- if (op3 != 0) {
+ trace_write_gather(cdma, g->bo, g->offset, g->words);
+
+ if (op3 != 0) {
#if HOST1X_HW >= 6
- u32 op1 = host1x_opcode_gather_wide(g->words);
- u32 op4 = HOST1X_OPCODE_NOP;
+ u32 op1 = host1x_opcode_gather_wide(g->words);
+ u32 op4 = HOST1X_OPCODE_NOP;
- host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
+ host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
#else
- dev_err(dev, "invalid gather for push buffer %pad\n",
- &addr);
- continue;
+ dev_err(dev, "invalid gather for push buffer %pad\n",
+ &addr);
+ continue;
#endif
- } else {
- u32 op1 = host1x_opcode_gather(g->words);
+ } else {
+ u32 op1 = host1x_opcode_gather(g->words);
- host1x_cdma_push(cdma, op1, op2);
+ host1x_cdma_push(cdma, op1, op2);
+ }
}
}
}
@@ -126,7 +171,7 @@ static int channel_submit(struct host1x_job *job)
struct host1x *host = dev_get_drvdata(ch->dev->parent);
trace_host1x_channel_submit(dev_name(ch->dev),
- job->num_gathers, job->num_relocs,
+ job->num_cmds, job->num_relocs,
job->syncpt->id, job->syncpt_incrs);
/* before error checks, return current max */
@@ -181,7 +226,7 @@ static int channel_submit(struct host1x_job *job)
host1x_opcode_setclass(job->class, 0, 0),
HOST1X_OPCODE_NOP);
- submit_gathers(job);
+ submit_gathers(job, syncval - user_syncpt_incrs);
/* end CDMA submit & stash pinned hMems into sync queue */
host1x_cdma_end(&ch->cdma, job);
@@ -191,7 +236,7 @@ static int channel_submit(struct host1x_job *job)
/* schedule a submit complete interrupt */
err = host1x_intr_add_action(host, sp, syncval,
HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
- completed_waiter, NULL);
+ completed_waiter, &job->waiter);
completed_waiter = NULL;
WARN(err, "Failed to set submit complete interrupt");
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index ceb48229d14b..54e31d81517b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -156,9 +156,9 @@ static unsigned int show_channel_command(struct output *o, u32 val,
}
}
-static void show_gather(struct output *o, phys_addr_t phys_addr,
+static void show_gather(struct output *o, dma_addr_t phys_addr,
unsigned int words, struct host1x_cdma *cdma,
- phys_addr_t pin_addr, u32 *map_addr)
+ dma_addr_t pin_addr, u32 *map_addr)
{
/* Map dmaget cursor to corresponding mem handle */
u32 offset = phys_addr - pin_addr;
@@ -176,11 +176,11 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
}
for (i = 0; i < words; i++) {
- u32 addr = phys_addr + i * 4;
+ dma_addr_t addr = phys_addr + i * 4;
u32 val = *(map_addr + offset / 4 + i);
if (!data_count) {
- host1x_debug_output(o, "%08x: %08x: ", addr, val);
+ host1x_debug_output(o, " %pad: %08x: ", &addr, val);
data_count = show_channel_command(o, val, &payload);
} else {
host1x_debug_cont(o, "%08x%s", val,
@@ -195,23 +195,25 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
struct push_buffer *pb = &cdma->push_buffer;
struct host1x_job *job;
- host1x_debug_output(o, "PUSHBUF at %pad, %u words\n",
- &pb->dma, pb->size / 4);
-
- show_gather(o, pb->dma, pb->size / 4, cdma, pb->dma, pb->mapped);
-
list_for_each_entry(job, &cdma->sync_queue, list) {
unsigned int i;
- host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
- job, job->syncpt->id, job->syncpt_end,
- job->first_get, job->timeout,
+ host1x_debug_output(o, "JOB, syncpt %u: %u timeout: %u num_slots: %u num_handles: %u\n",
+ job->syncpt->id, job->syncpt_end, job->timeout,
job->num_slots, job->num_unpins);
- for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
+ show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma,
+ pb->dma + job->first_get, pb->mapped + job->first_get);
+
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_gather *g;
u32 *mapped;
+ if (job->cmds[i].is_wait)
+ continue;
+
+ g = &job->cmds[i].gather;
+
if (job->gather_copy_mapped)
mapped = (u32 *)job->gather_copy_mapped;
else
@@ -222,7 +224,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue;
}
- host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
+ host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
&g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x01.c b/drivers/gpu/host1x/hw/debug_hw_1x01.c
index 02a93305ac7b..85242a59fa6a 100644
--- a/drivers/gpu/host1x/hw/debug_hw_1x01.c
+++ b/drivers/gpu/host1x/hw/debug_hw_1x01.c
@@ -16,10 +16,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
struct output *o)
{
struct host1x_cdma *cdma = &ch->cdma;
+ dma_addr_t dmastart, dmaend;
u32 dmaput, dmaget, dmactrl;
u32 cbstat, cbread;
u32 val, base, baseval;
+ dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART);
+ dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND);
dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
@@ -56,9 +59,10 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
cbread);
- host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend);
+ host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n",
dmaput, dmaget, dmactrl);
- host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+ host1x_debug_output(o, "CBREAD %08x CBSTAT %08x\n", cbread, cbstat);
show_channel_gathers(o, cdma);
host1x_debug_output(o, "\n");
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
index 6d1b583aa90f..9d0667879a19 100644
--- a/drivers/gpu/host1x/hw/debug_hw_1x06.c
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -16,10 +16,23 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
struct output *o)
{
struct host1x_cdma *cdma = &ch->cdma;
+ dma_addr_t dmastart = 0, dmaend = 0;
u32 dmaput, dmaget, dmactrl;
u32 offset, class;
u32 ch_stat;
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6
+ dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART_HI);
+ dmastart <<= 32;
+#endif
+ dmastart |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6
+ dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND_HI);
+ dmaend <<= 32;
+#endif
+ dmaend |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND);
+
dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
@@ -41,7 +54,8 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
host1x_debug_output(o, "active class %02x, offset %04x\n",
class, offset);
- host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend);
+ host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n",
dmaput, dmaget, dmactrl);
host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat);
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
index 4fc51f70496b..0a2ab8f1da6f 100644
--- a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
}
#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+ return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+ host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+ return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+ host1x_uclass_wait_syncpt_32_r()
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
index 9e84a4adca9f..60c692b92955 100644
--- a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
}
#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+ return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+ host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+ return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+ host1x_uclass_wait_syncpt_32_r()
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_uclass.h b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
index aee5a4e32877..2fcc9a2ad3ef 100644
--- a/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
}
#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+ return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+ host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+ return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+ host1x_uclass_wait_syncpt_32_r()
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
index c4bacdb7155f..5f831438d19b 100644
--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
}
#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+ return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+ host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+ return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+ host1x_uclass_wait_syncpt_32_r()
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
index c74070f3f203..8cd2ef087d5d 100644
--- a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
}
#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+ return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+ host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+ return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+ host1x_uclass_wait_syncpt_32_r()
#endif
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 6d1f3c0fdbe7..45b6be927ec4 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -13,6 +13,7 @@
#include <trace/events/host1x.h>
#include "channel.h"
#include "dev.h"
+#include "fence.h"
#include "intr.h"
/* Wait list management */
@@ -121,12 +122,20 @@ static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
wake_up_interruptible(wq);
}
+static void action_signal_fence(struct host1x_waitlist *waiter)
+{
+ struct host1x_syncpt_fence *f = waiter->data;
+
+ host1x_fence_signal(f);
+}
+
typedef void (*action_handler)(struct host1x_waitlist *waiter);
static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
action_submit_complete,
action_wakeup,
action_wakeup_interruptible,
+ action_signal_fence,
};
static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 6ea55e615e3a..e4c346099273 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -33,6 +33,8 @@ enum host1x_intr_action {
*/
HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+ HOST1X_INTR_ACTION_SIGNAL_FENCE,
+
HOST1X_INTR_ACTION_COUNT
};
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index adbdc225de8d..0eef6df7c89e 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -24,21 +24,25 @@
#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs)
+ u32 num_cmdbufs, u32 num_relocs,
+ bool skip_firewall)
{
struct host1x_job *job = NULL;
unsigned int num_unpins = num_relocs;
+ bool enable_firewall;
u64 total;
void *mem;
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
+
+ if (!enable_firewall)
num_unpins += num_cmdbufs;
/* Check that we're not going to overflow */
total = sizeof(struct host1x_job) +
(u64)num_relocs * sizeof(struct host1x_reloc) +
(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
- (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
+ (u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
(u64)num_unpins * sizeof(dma_addr_t) +
(u64)num_unpins * sizeof(u32 *);
if (total > ULONG_MAX)
@@ -48,6 +52,8 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
if (!job)
return NULL;
+ job->enable_firewall = enable_firewall;
+
kref_init(&job->ref);
job->channel = ch;
@@ -57,8 +63,8 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
mem += num_relocs * sizeof(struct host1x_reloc);
job->unpins = num_unpins ? mem : NULL;
mem += num_unpins * sizeof(struct host1x_job_unpin_data);
- job->gathers = num_cmdbufs ? mem : NULL;
- mem += num_cmdbufs * sizeof(struct host1x_job_gather);
+ job->cmds = num_cmdbufs ? mem : NULL;
+ mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
job->addr_phys = num_unpins ? mem : NULL;
job->reloc_addr_phys = job->addr_phys;
@@ -79,6 +85,13 @@ static void job_free(struct kref *ref)
{
struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+ if (job->release)
+ job->release(job);
+
+ if (job->waiter)
+ host1x_intr_put_ref(job->syncpt->host, job->syncpt->id,
+ job->waiter, false);
+
if (job->syncpt)
host1x_syncpt_put(job->syncpt);
@@ -94,22 +107,38 @@ EXPORT_SYMBOL(host1x_job_put);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset)
{
- struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
+ struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
gather->words = words;
gather->bo = bo;
gather->offset = offset;
- job->num_gathers++;
+ job->num_cmds++;
}
EXPORT_SYMBOL(host1x_job_add_gather);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class)
+{
+ struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
+
+ cmd->is_wait = true;
+ cmd->wait.id = id;
+ cmd->wait.threshold = thresh;
+ cmd->wait.next_class = next_class;
+ cmd->wait.relative = relative;
+
+ job->num_cmds++;
+}
+EXPORT_SYMBOL(host1x_job_add_wait);
+
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
struct host1x_client *client = job->client;
struct device *dev = client->dev;
struct host1x_job_gather *g;
struct iommu_domain *domain;
+ struct sg_table *sgt;
unsigned int i;
int err;
@@ -119,7 +148,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
dma_addr_t phys_addr, *phys;
- struct sg_table *sgt;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -192,20 +220,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
* We will copy gathers BO content later, so there is no need to
* hold and pin them.
*/
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ if (job->enable_firewall)
return 0;
- for (i = 0; i < job->num_gathers; i++) {
+ for (i = 0; i < job->num_cmds; i++) {
size_t gather_size = 0;
struct scatterlist *sg;
- struct sg_table *sgt;
dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
dma_addr_t *phys;
unsigned int j;
- g = &job->gathers[i];
+ if (job->cmds[i].is_wait)
+ continue;
+
+ g = &job->cmds[i].gather;
+
g->bo = host1x_bo_get(g->bo);
if (!g->bo) {
err = -EINVAL;
@@ -296,7 +327,7 @@ static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
if (cmdbuf != reloc->cmdbuf.bo)
continue;
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
+ if (job->enable_firewall) {
target = (u32 *)job->gather_copy_mapped +
reloc->cmdbuf.offset / sizeof(u32) +
g->offset / sizeof(u32);
@@ -538,8 +569,13 @@ static inline int copy_gathers(struct device *host, struct host1x_job *job,
fw.num_relocs = job->num_relocs;
fw.class = job->class;
- for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_gather *g;
+
+ if (job->cmds[i].is_wait)
+ continue;
+
+ g = &job->cmds[i].gather;
size += g->words * sizeof(u32);
}
@@ -561,10 +597,14 @@ static inline int copy_gathers(struct device *host, struct host1x_job *job,
job->gather_copy_size = size;
- for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_gather *g;
void *gather;
+ if (job->cmds[i].is_wait)
+ continue;
+ g = &job->cmds[i].gather;
+
/* Copy the gather */
gather = host1x_bo_mmap(g->bo);
memcpy(job->gather_copy_mapped + offset, gather + g->offset,
@@ -600,28 +640,33 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
if (err)
goto out;
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
+ if (job->enable_firewall) {
err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
/* patch gathers */
- for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_gather *g;
+
+ if (job->cmds[i].is_wait)
+ continue;
+ g = &job->cmds[i].gather;
/* process each gather mem only once */
if (g->handled)
continue;
/* copy_gathers() sets gathers base if firewall is enabled */
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ if (!job->enable_firewall)
g->base = job->gather_addr_phys[i];
- for (j = i + 1; j < job->num_gathers; j++) {
- if (job->gathers[j].bo == g->bo) {
- job->gathers[j].handled = true;
- job->gathers[j].base = g->base;
+ for (j = i + 1; j < job->num_cmds; j++) {
+ if (!job->cmds[j].is_wait &&
+ job->cmds[j].gather.bo == g->bo) {
+ job->cmds[j].gather.handled = true;
+ job->cmds[j].gather.base = g->base;
}
}
@@ -649,8 +694,7 @@ void host1x_job_unpin(struct host1x_job *job)
struct device *dev = unpin->dev ?: host->dev;
struct sg_table *sgt = unpin->sgt;
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
- unpin->size && host->domain) {
+ if (!job->enable_firewall && unpin->size && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i],
unpin->size);
free_iova(&host->iova,
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 94bc2e4ae241..b4428c5495c9 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -18,6 +18,22 @@ struct host1x_job_gather {
bool handled;
};
+struct host1x_job_wait {
+ u32 id;
+ u32 threshold;
+ u32 next_class;
+ bool relative;
+};
+
+struct host1x_job_cmd {
+ bool is_wait;
+
+ union {
+ struct host1x_job_gather gather;
+ struct host1x_job_wait wait;
+ };
+};
+
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index e648ebbb2027..d198a10848c6 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -407,6 +407,8 @@ static void syncpt_release(struct kref *ref)
atomic_set(&sp->max_val, host1x_syncpt_read(sp));
+ sp->locked = false;
+
mutex_lock(&sp->host->syncpt_mutex);
host1x_syncpt_base_free(sp->base);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index a6766f8d55ee..95cd29b79d6d 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -40,6 +40,13 @@ struct host1x_syncpt {
/* interrupt data */
struct host1x_syncpt_intr intr;
+
+ /*
+ * If a submission incrementing this syncpoint fails, lock it so that
+ * further submission cannot be made until application has handled the
+ * failure.
+ */
+ bool locked;
};
/* Initialize sync point array */
@@ -115,4 +122,9 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
return sp->id < host1x_syncpt_nb_pts(sp->host);
}
+static inline void host1x_syncpt_set_locked(struct host1x_syncpt *sp)
+{
+ sp->locked = true;
+}
+
#endif