diff options
author | Dave Airlie | 2019-05-03 10:00:42 +1000 |
---|---|---|
committer | Dave Airlie | 2019-05-03 10:31:07 +1000 |
commit | 422449238e9853458283beffed77562d4b40a2fa (patch) | |
tree | 2eec3947c77dcdced46a7e391b49b326c2dc18f5 | |
parent | 9f17847d853bbaa6d06510a31bff70716cbb73ab (diff) | |
parent | b0fc850fd95f8ecceb601bbb40624da0a8c220a0 (diff) |
Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next
- SR-IOV fixes
- Raven flickering fix
- Misc spelling fixes
- Vega20 power fixes
- Freesync improvements
- DC fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190502193020.3562-1-alexander.deucher@amd.com
46 files changed, 734 insertions, 473 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index acf8ae0cee9a..aeead072fa79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } +uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, + enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + switch (type) { + case KGD_ENGINE_PFP: + return adev->gfx.pfp_fw_version; + + case KGD_ENGINE_ME: + return adev->gfx.me_fw_version; + + case KGD_ENGINE_CE: + return adev->gfx.ce_fw_version; + + case KGD_ENGINE_MEC1: + return adev->gfx.mec_fw_version; + + case KGD_ENGINE_MEC2: + return adev->gfx.mec2_fw_version; + + case KGD_ENGINE_RLC: + return adev->gfx.rlc_fw_version; + + case KGD_ENGINE_SDMA1: + return adev->sdma.instance[0].fw_version; + + case KGD_ENGINE_SDMA2: + return adev->sdma.instance[1].fw_version; + + default: + return 0; + } + + return 0; +} + void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 775f815f9521..4e37fa7e85b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -81,6 +81,18 @@ struct amdgpu_kfd_dev { uint64_t vram_used; }; +enum kgd_engine_type { + KGD_ENGINE_PFP = 1, + KGD_ENGINE_ME, + KGD_ENGINE_CE, + KGD_ENGINE_MEC1, + KGD_ENGINE_MEC2, + KGD_ENGINE_RLC, + KGD_ENGINE_SDMA1, + KGD_ENGINE_SDMA2, + KGD_ENGINE_MAX +}; + struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); @@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); +uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, + enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ff7fac7df34b..fa09e11a600c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -22,14 +22,12 @@ #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "cikd.h" #include "cik_sdma.h" -#include "amdgpu_ucode.h" #include "gfx_v7_0.h" #include "gca/gfx_7_2_d.h" #include "gca/gfx_7_2_enum.h" @@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, @@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, unlock_srbm(kgd); } -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 56ea929f524b..fec3a6aa1de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -23,12 +23,10 @@ #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" #include "gfx_v8_0.h" #include "gca/gfx_8_0_sh_mask.h" #include "gca/gfx_8_0_d.h" @@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, @@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = { get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, unlock_srbm(kgd); } -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 5c51d4910650..ef3d93b995b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -25,12 +25,10 @@ #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" #include "soc15_hw_ip.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" @@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); @@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, */ } -/* FIXME: Does this need to be ASIC-specific code? */ -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d47354997e3c..9ec6356d3f0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3437,7 +3437,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); + DRM_INFO("VRAM is lost due to GPU reset!\n"); atomic_inc(&tmp_adev->vram_lost_counter); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ec9e45004bff..93b2c5a48a71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) if (bo->gem_base.import_attach) drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); - amdgpu_bo_unref(&bo->parent); + /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&bo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); list_del_init(&bo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } + amdgpu_bo_unref(&bo->parent); + kfree(bo->metadata); kfree(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 4b7a076eea9c..95144e49c7f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -144,7 +144,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (adev->smu.ppt_funcs->get_current_power_state) + if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) pm = amdgpu_smu_get_current_power_state(adev); else if (adev->powerplay.pp_funcs->get_current_power_state) pm = amdgpu_dpm_get_current_power_state(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 7e7f9ed89ee1..7d484fad3909 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) /* enable virtual display */ adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; + adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 6a0fcd67662a..aef9d059ae52 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) /* wait until RCV_MSG become 3 */ if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { - pr_err("failed to recieve FLR_CMPL\n"); + pr_err("failed to receive FLR_CMPL\n"); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1ec60f54b992..9c88ce513d78 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -156,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { @@ -186,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -851,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ @@ -942,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_PAGE_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bdb5ad93990d..4900e4958dec 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -470,6 +470,12 @@ static int soc15_asic_reset(struct amdgpu_device *adev) case CHIP_VEGA12: soc15_asic_get_baco_capability(adev, &baco_reset); break; + case CHIP_VEGA20: + if (adev->psp.sos_fw_version >= 0x80067) + soc15_asic_get_baco_capability(adev, &baco_reset); + else + baco_reset = false; + break; default: baco_reset = false; break; @@ -895,7 +901,8 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } else if (adev->pdev->device == 0x15d8) { - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS | diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index bed78a778e3f..40363ca6c5f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev) } if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); + DRM_INFO("VCE is busy, Can't set clock gating"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index aadc3e66ebd7..f3f5938430d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -382,6 +382,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev) static int vce_v4_0_stop(struct amdgpu_device *adev) { + /* Disable VCPU */ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001); /* hold on ECPU */ @@ -389,8 +390,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - /* clear BUSY flag */ - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK); + /* clear VCE_STATUS */ + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0); /* Set Clock-Gating off */ /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -922,6 +923,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, return 0; } +#endif static int vce_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) @@ -935,16 +937,11 @@ static int vce_v4_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) - /* XXX do we need a vce_v4_0_stop()? */ - return 0; + return vce_v4_0_stop(adev); else return vce_v4_0_start(adev); } -#endif static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) @@ -1059,7 +1056,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = { .soft_reset = NULL /* vce_v4_0_soft_reset */, .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, - .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */, + .set_powergating_state = vce_v4_0_set_powergating_state, }; static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2fee3063a0d6..c1e4d44d6137 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -494,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, { unsigned int size; - kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_MEC1); - kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 923cb5ca5a57..1854506e3e8f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -364,6 +364,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; struct dm_crtc_state *acrtc_state; + unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); @@ -379,8 +380,25 @@ static void dm_vupdate_high_irq(void *interrupt_params) * page-flip completion events that have been queued to us * if a pageflip happened inside front-porch. */ - if (amdgpu_dm_vrr_active(acrtc_state)) + if (amdgpu_dm_vrr_active(acrtc_state)) { drm_crtc_handle_vblank(&acrtc->base); + + /* BTR processing for pre-DCE12 ASICs */ + if (acrtc_state->stream && + adev->family < AMDGPU_FAMILY_AI) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + } + } } } @@ -390,6 +408,7 @@ static void dm_crtc_high_irq(void *interrupt_params) struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; struct dm_crtc_state *acrtc_state; + unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); @@ -412,9 +431,10 @@ static void dm_crtc_high_irq(void *interrupt_params) */ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - if (acrtc_state->stream && + if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc_state->stream, @@ -424,6 +444,7 @@ static void dm_crtc_high_irq(void *interrupt_params) adev->dm.dc, acrtc_state->stream, &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } } } @@ -534,6 +555,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; + init_data.flags.power_down_display_on_boot = true; + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -3438,6 +3461,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } + state->active_planes = cur->active_planes; + state->interrupts_enabled = cur->interrupts_enabled; state->vrr_params = cur->vrr_params; state->vrr_infopacket = cur->vrr_infopacket; state->abm_level = cur->abm_level; @@ -3862,7 +3887,20 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state) +static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) +{ + struct drm_device *dev = new_crtc_state->crtc->dev; + struct drm_plane *plane; + + drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return true; + } + + return false; +} + +static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; @@ -3891,7 +3929,32 @@ static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state) num_active += (new_plane_state->fb != NULL); } - return num_active > 0; + return num_active; +} + +/* + * Sets whether interrupts should be enabled on a specific CRTC. + * We require that the stream be enabled and that there exist active + * DC planes on the stream. + */ +static void +dm_update_crtc_interrupt_state(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->active_planes = 0; + dm_new_crtc_state->interrupts_enabled = false; + + if (!dm_new_crtc_state->stream) + return; + + dm_new_crtc_state->active_planes = + count_crtc_active_planes(new_crtc_state); + + dm_new_crtc_state->interrupts_enabled = + dm_new_crtc_state->active_planes > 0; } static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, @@ -3902,6 +3965,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); int ret = -EINVAL; + /* + * Update interrupt state for the CRTC. This needs to happen whenever + * the CRTC has changed or whenever any of its planes have changed. + * Atomic check satisfies both of these requirements since the CRTC + * is added to the state by DRM during drm_atomic_helper_check_planes. + */ + dm_update_crtc_interrupt_state(crtc, state); + if (unlikely(!dm_crtc_state->stream && modeset_required(state, NULL, dm_crtc_state->stream))) { WARN_ON(1); @@ -3912,9 +3983,13 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, if (!dm_crtc_state->stream) return 0; - /* We want at least one hardware plane enabled to use the stream. */ + /* + * We want at least one hardware plane enabled to use + * the stream with a cursor enabled. + */ if (state->enable && state->active && - !does_crtc_have_active_plane(state)) + does_crtc_have_active_cursor(state) && + dm_crtc_state->active_planes == 0) return -EINVAL; if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) @@ -4188,6 +4263,7 @@ static const uint32_t rgb_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, }; static const uint32_t overlay_formats[] = { @@ -4196,6 +4272,7 @@ static const uint32_t overlay_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565 }; static const u32 cursor_formats[] = { @@ -4999,8 +5076,10 @@ static void update_freesync_state_on_stream( struct dc_plane_state *surface, u32 flip_timestamp_in_us) { - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; + struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket = {0}; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; if (!new_stream) return; @@ -5013,6 +5092,9 @@ static void update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; + if (surface) { mod_freesync_handle_preflip( dm->freesync_module, @@ -5020,6 +5102,12 @@ static void update_freesync_state_on_stream( new_stream, flip_timestamp_in_us, &vrr_params); + + if (adev->family < AMDGPU_FAMILY_AI && + amdgpu_dm_vrr_active(new_crtc_state)) { + mod_freesync_handle_v_update(dm->freesync_module, + new_stream, &vrr_params); + } } mod_freesync_build_vrr_infopacket( @@ -5051,6 +5139,8 @@ static void update_freesync_state_on_stream( new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } static void pre_update_freesync_state_on_stream( @@ -5058,8 +5148,10 @@ static void pre_update_freesync_state_on_stream( struct dm_crtc_state *new_crtc_state) { struct dc_stream_state *new_stream = new_crtc_state->stream; - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; + struct mod_vrr_params vrr_params; struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; if (!new_stream) return; @@ -5071,6 +5163,9 @@ static void pre_update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; + if (new_crtc_state->vrr_supported && config.min_refresh_in_uhz && config.max_refresh_in_uhz) { @@ -5091,6 +5186,7 @@ static void pre_update_freesync_state_on_stream( sizeof(vrr_params.adjust)) != 0); new_crtc_state->vrr_params = vrr_params; + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, @@ -5123,6 +5219,22 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, } } +static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i; + + /* + * TODO: Make this per-stream so we don't issue redundant updates for + * commits with multiple streams. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) + if (plane->type == DRM_PLANE_TYPE_CURSOR) + handle_cursor_update(plane, old_plane_state); +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dc_state *dc_state, struct drm_device *dev, @@ -5162,6 +5274,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, goto cleanup; } + /* + * Disable the cursor first if we're disabling all the planes. + * It'll remain on the screen after the planes are re-enabled + * if we don't. + */ + if (acrtc_state->active_planes == 0) + amdgpu_dm_commit_cursors(state); + /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; @@ -5205,22 +5325,28 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; } + abo = gem_to_amdgpu_bo(fb->obj[0]); + + /* + * Wait for all fences on this FB. Do limited wait to avoid + * deadlock during GPU reset when this fence will not signal + * but we hold reservation lock for the BO. + */ + r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true, + false, + msecs_to_jiffies(5000)); + if (unlikely(r <= 0)) + DRM_ERROR("Waiting for fences timed out or interrupted!"); + /* * TODO This might fail and hence better not used, wait * explicitly on fences instead * and in general should be called for * blocking commit to as per framework helpers */ - abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) { + if (unlikely(r != 0)) DRM_ERROR("failed to reserve buffer before flip\n"); - WARN_ON(1); - } - - /* Wait for all fences on this FB */ - WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, - MAX_SCHEDULE_TIMEOUT) < 0); amdgpu_bo_get_tiling_flags(abo, &tiling_flags); @@ -5329,7 +5455,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } } - if (planes_count) { + /* Update the planes if changed or disable if we don't have any. */ + if (planes_count || acrtc_state->active_planes == 0) { if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -5352,15 +5479,72 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, mutex_unlock(&dm->dc_lock); } - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - if (plane->type == DRM_PLANE_TYPE_CURSOR) - handle_cursor_update(plane, old_plane_state); + /* + * Update cursor state *after* programming all the planes. + * This avoids redundant programming in the case where we're going + * to be disabling a single plane - those pipes are being disabled. + */ + if (acrtc_state->active_planes) + amdgpu_dm_commit_cursors(state); cleanup: kfree(bundle); } /* + * Enable interrupts on CRTCs that are newly active, undergone + * a modeset, or have active planes again. + * + * Done in two passes, based on the for_modeset flag: + * Pass 1: For CRTCs going through modeset + * Pass 2: For CRTCs going from 0 to n active planes + * + * Interrupts can only be enabled after the planes are programmed, + * so this requires a two-pass approach since we don't want to + * just defer the interrupts until after commit planes every time. + */ +static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, + struct drm_atomic_state *state, + bool for_modeset) +{ + struct amdgpu_device *adev = dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + struct dm_crtc_state *dm_old_crtc_state = + to_dm_crtc_state(old_crtc_state); + bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state); + bool run_pass; + + run_pass = (for_modeset && modeset) || + (!for_modeset && !modeset && + !dm_old_crtc_state->interrupts_enabled); + + if (!run_pass) + continue; + + if (!dm_new_crtc_state->interrupts_enabled) + continue; + + manage_dm_interrupts(adev, acrtc, true); + +#ifdef CONFIG_DEBUG_FS + /* The stream has changed so CRC capture needs to re-enabled. */ + if (dm_new_crtc_state->crc_enabled) { + dm_new_crtc_state->crc_enabled = false; + amdgpu_dm_crtc_set_crc_source(crtc, "auto"); + } +#endif + } +} + +/* * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC * @crtc_state: the DRM CRTC state * @stream_state: the DC stream state. @@ -5384,30 +5568,41 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, int i; /* - * We evade vblanks and pflips on crtc that - * should be changed. We do it here to flush & disable - * interrupts before drm_swap_state is called in drm_atomic_helper_commit - * it will update crtc->dm_crtc_state->stream pointer which is used in - * the ISRs. + * We evade vblank and pflip interrupts on CRTCs that are undergoing + * a modeset, being disabled, or have no active planes. + * + * It's done in atomic commit rather than commit tail for now since + * some of these interrupt handlers access the current CRTC state and + * potentially the stream pointer itself. + * + * Since the atomic state is swapped within atomic commit and not within + * commit tail this would leave to new state (that hasn't been committed yet) + * being accesssed from within the handlers. + * + * TODO: Fix this so we can do this in commit tail and not have to block + * in atomic check. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - if (drm_atomic_crtc_needs_modeset(new_crtc_state) - && dm_old_crtc_state->stream) { + if (dm_old_crtc_state->interrupts_enabled && + (!dm_new_crtc_state->interrupts_enabled || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { /* - * If the stream is removed and CRC capture was - * enabled on the CRTC the extra vblank reference - * needs to be dropped since CRC capture will be - * disabled. + * Drop the extra vblank reference added by CRC + * capture if applicable. */ - if (!dm_new_crtc_state->stream - && dm_new_crtc_state->crc_enabled) { + if (dm_new_crtc_state->crc_enabled) drm_crtc_vblank_put(crtc); + + /* + * Only keep CRC capture enabled if there's + * still a stream for the CRTC. + */ + if (!dm_new_crtc_state->stream) dm_new_crtc_state->crc_enabled = false; - } manage_dm_interrupts(adev, acrtc, false); } @@ -5623,47 +5818,26 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_unlock(&dm->dc_lock); } - /* Update freesync state before amdgpu_dm_handle_vrr_transition(). */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); - } - + /* Count number of newly disabled CRTCs for dropping PM refs later. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - /* - * loop to enable interrupts on newly arrived crtc - */ - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - bool modeset_needed; - + new_crtc_state, i) { if (old_crtc_state->active && !new_crtc_state->active) crtc_disable_count++; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + /* Update freesync active state. */ + pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); + /* Handle vrr on->off / off->on transitions */ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); - - modeset_needed = modeset_required( - new_crtc_state, - dm_new_crtc_state->stream, - dm_old_crtc_state->stream); - - if (dm_new_crtc_state->stream == NULL || !modeset_needed) - continue; - - manage_dm_interrupts(adev, acrtc, true); - -#ifdef CONFIG_DEBUG_FS - /* The stream has changed so CRC capture needs to re-enabled. */ - if (dm_new_crtc_state->crc_enabled) - amdgpu_dm_crtc_set_crc_source(crtc, "auto"); -#endif } + /* Enable interrupts for CRTCs going through a modeset. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, true); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) wait_for_vblank = false; @@ -5677,6 +5851,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm, crtc, wait_for_vblank); } + /* Enable interrupts for CRTCs going from 0 to n active planes. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, false); /* * send vblank event on all events not handled in flip and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 3a0b6164c755..978ff14a7d45 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -271,6 +271,9 @@ struct dm_crtc_state { struct drm_crtc_state base; struct dc_stream_state *stream; + int active_planes; + bool interrupts_enabled; + int crc_skip_count; bool crc_enabled; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 3ef68a249f4d..b37ecc3ede61 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -514,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } +static void read_edp_current_link_settings_on_detect(struct dc_link *link) +{ + union lane_count_set lane_count_set = { {0} }; + uint8_t link_bw_set; + uint8_t link_rate_set; + + // Read DPCD 00101h to find out the number of lanes currently set + core_link_read_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, sizeof(lane_count_set)); + link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set == 0) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + link->cur_link_settings.link_rate = link_bw_set; + link->cur_link_settings.use_link_rate_set = false; + } +} + static bool detect_dp( struct dc_link *link, struct display_sink_capability *sink_caps, @@ -648,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP && - link->local_sink) - return true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* On detect, we want to make sure current link settings are + * up to date, especially if link was powered on by GOP. + */ + read_edp_current_link_settings_on_detect(link); + if (link->local_sink) + return true; + } if (link->connector_signal == SIGNAL_TYPE_LVDS && link->local_sink) @@ -1396,13 +1435,19 @@ static enum dc_status enable_link_dp( /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); - /* If link settings are different than current and link already enabled - * then need to disable before programming to new rate. - */ - if (link->link_status.link_active && - (link->cur_link_settings.lane_count != link_settings.lane_count || - link->cur_link_settings.link_rate != link_settings.link_rate)) { - dp_disable_link_phy(link, pipe_ctx->stream->signal); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { + /* If link settings are different than current and link already enabled + * then need to disable before programming to new rate. + */ + if (link->link_status.link_active && + (link->cur_link_settings.lane_count != link_settings.lane_count || + link->cur_link_settings.link_rate != link_settings.link_rate)) { + dp_disable_link_phy(link, pipe_ctx->stream->signal); + } + + /*in case it is not on*/ + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); } pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = @@ -1448,15 +1493,9 @@ static enum dc_status enable_link_edp( struct pipe_ctx *pipe_ctx) { enum dc_status status; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - /*in case it is not on*/ - link->dc->hwss.edp_power_control(link, true); - link->dc->hwss.edp_wait_for_hpd_ready(link, true); status = enable_link_dp(state, pipe_ctx); - return status; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a6424c70f4c5..1ee544a32ebb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2185,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) return -1; } +static void read_dp_device_vendor_id(struct dc_link *link) +{ + struct dp_device_vendor_id dp_id; + + /* read IEEE branch device id */ + core_link_read_dpcd( + link, + DP_BRANCH_OUI, + (uint8_t *)&dp_id, + sizeof(dp_id)); + + link->dpcd_caps.branch_dev_id = + (dp_id.ieee_oui[0] << 16) + + (dp_id.ieee_oui[1] << 8) + + dp_id.ieee_oui[2]; + + memmove( + link->dpcd_caps.branch_dev_name, + dp_id.ieee_device_id, + sizeof(dp_id.ieee_device_id)); +} + + + static void get_active_converter_info( uint8_t data, struct dc_link *link) { @@ -2271,27 +2295,6 @@ static void get_active_converter_info( ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); { - struct dp_device_vendor_id dp_id; - - /* read IEEE branch device id */ - core_link_read_dpcd( - link, - DP_BRANCH_OUI, - (uint8_t *)&dp_id, - sizeof(dp_id)); - - link->dpcd_caps.branch_dev_id = - (dp_id.ieee_oui[0] << 16) + - (dp_id.ieee_oui[1] << 8) + - dp_id.ieee_oui[2]; - - memmove( - link->dpcd_caps.branch_dev_name, - dp_id.ieee_device_id, - sizeof(dp_id.ieee_device_id)); - } - - { struct dp_sink_hw_fw_revision dp_hw_fw_revision; core_link_read_dpcd( @@ -2455,6 +2458,8 @@ static bool retrieve_link_cap(struct dc_link *link) ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - DP_DPCD_REV]; + read_dp_device_vendor_id(link); + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -2586,9 +2591,6 @@ void detect_edp_sink_caps(struct dc_link *link) uint32_t entry; uint32_t link_rate_in_khz; enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - union lane_count_set lane_count_set = { {0} }; - uint8_t link_bw_set; - uint8_t link_rate_set; retrieve_link_cap(link); link->dpcd_caps.edp_supported_link_rates_count = 0; @@ -2614,33 +2616,6 @@ void detect_edp_sink_caps(struct dc_link *link) } } link->verified_link_cap = link->reported_link_cap; - - // Read DPCD 00101h to find out the number of lanes currently set - core_link_read_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, sizeof(lane_count_set)); - link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; - - // Read DPCD 00100h to find if standard link rates are set - core_link_read_dpcd(link, DP_LINK_BW_SET, - &link_bw_set, sizeof(link_bw_set)); - - if (link_bw_set == 0) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; - } - } else { - link->cur_link_settings.link_rate = link_bw_set; - link->cur_link_settings.use_link_rate_set = false; - } } void dc_link_dp_enable_hpd(const struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index f7f7515f65f4..b0dea759cd86 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -58,6 +58,8 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link->link_enc; + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; @@ -84,6 +86,9 @@ void dp_enable_link_phy( } } + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( link_enc, @@ -95,6 +100,10 @@ void dp_enable_link_phy( link_settings, clock_source); } + + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + link->cur_link_settings = *link_settings; dp_receiver_power_ctrl(link, true); @@ -150,15 +159,25 @@ bool edp_receiver_ready_T7(struct dc_link *link) void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + if (!link->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(link, false); if (signal == SIGNAL_TYPE_EDP) { link->link_enc->funcs->disable_output(link->link_enc, signal); link->dc->hwss.edp_power_control(link, false); - } else + } else { + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + link->link_enc->funcs->disable_output(link->link_enc, signal); + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + } + /* Clear current link setting.*/ memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index e10479d58c11..96e97d25d639 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -163,6 +163,27 @@ struct dc_stream_state *dc_create_stream_for_sink( return stream; } +struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) +{ + struct dc_stream_state *new_stream; + + new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); + if (!new_stream) + return NULL; + + memcpy(new_stream, stream, sizeof(struct dc_stream_state)); + + if (new_stream->sink) + dc_sink_retain(new_stream->sink); + + if (new_stream->out_transfer_func) + dc_transfer_func_retain(new_stream->out_transfer_func); + + kref_init(&new_stream->refcount); + + return new_stream; +} + /** * dc_stream_get_status_from_state - Get stream status from given dc state * @state: DC state to find the stream status in @@ -312,7 +333,7 @@ bool dc_stream_set_cursor_position( (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || !pipe_ctx->plane_state || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || - !pipe_ctx->plane_res.ipp) + (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp)) continue; if (!pipe_to_program) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3459e39714bc..70edd9ea5afe 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.26" +#define DC_VER "3.2.27" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -204,6 +204,7 @@ struct dc_config { bool optimize_edp_link_rate; bool disable_fractional_pwm; bool allow_seamless_boot_optimization; + bool power_down_display_on_boot; }; enum visual_confirm { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index cc7ffac64c96..7b9429e30d82 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -120,6 +120,7 @@ struct dc_link { /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; + bool dp_skip_DID2; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 17fa3bf6cf7b..189bdab929a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -307,6 +307,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream( */ struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); +struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream); + void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 855360b1414f..da96229db53a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -50,7 +50,6 @@ #define MCP_ABM_LEVEL_SET 0x65 #define MCP_ABM_PIPE_SET 0x66 #define MCP_BL_SET 0x67 -#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MCP_DISABLE_ABM_IMMEDIATELY 255 @@ -391,23 +390,6 @@ static bool dce_abm_init_backlight(struct abm *abm) REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); - /* Wait until microcontroller is ready to process interrupt */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - - /* Set PWM fractional enable/disable */ - value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0; - REG_WRITE(MASTER_COMM_DATA_REG1, value); - - /* Set command to enable or disable fractional PWM microcontroller */ - REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, - MCP_BL_SET_PWM_FRAC); - - /* Notify microcontroller of new command */ - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); - - /* Ensure command has been executed before continuing */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index aa586672e8cd..818536eea00a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -51,6 +51,9 @@ #define PSR_SET_WAITLOOP 0x31 #define MCP_INIT_DMCU 0x88 #define MCP_INIT_IRAM 0x89 +#define MCP_SYNC_PHY_LOCK 0x90 +#define MCP_SYNC_PHY_UNLOCK 0x91 +#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L static bool dce_dmcu_init(struct dmcu *dmcu) @@ -339,9 +342,32 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu) IRAM_RD_ADDR_AUTO_INC, 0); } +static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu, + uint32_t fractional_pwm) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + + /* Wait until microcontroller is ready to process interrupt */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); + + /* Set PWM fractional enable/disable */ + REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm); + + /* Set command to enable or disable fractional PWM microcontroller */ + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, + MCP_BL_SET_PWM_FRAC); + + /* Notify microcontroller of new command */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + + /* Ensure command has been executed before continuing */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); +} + static bool dcn10_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; /* Definition of DC_DMCU_SCRATCH @@ -379,9 +405,14 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) if (dmcu->dmcu_state == DMCU_RUNNING) { /* Retrieve and cache the DMCU firmware version. */ dcn10_get_dmcu_version(dmcu); + + /* Initialize DMCU to use fractional PWM or not */ + dcn10_dmcu_enable_fractional_pwm(dmcu, + (config->disable_fractional_pwm == false) ? 1 : 0); status = true; - } else + } else { status = false; + } break; case DMCU_RUNNING: @@ -690,7 +721,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) return true; } -#endif +#endif //(CONFIG_DRM_AMD_DC_DCN1_0) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c index 0d9bee8d5ab9..2b2de1d913c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c @@ -151,9 +151,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, struct dc *dc = clk_mgr->ctx->dc; struct dc_debug_options *debug = &dc->debug; struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; - struct pp_smu_display_requirement_rv *smu_req_cur = - &dc->res_pool->pp_smu_req; - struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; struct pp_smu_funcs_rv *pp_smu = NULL; bool send_request_to_increase = false; bool send_request_to_lower = false; @@ -175,8 +172,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, */ if (pp_smu && pp_smu->set_display_count) pp_smu->set_display_count(&pp_smu->pp_smu, display_count); - - smu_req.display_count = display_count; } if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz @@ -187,7 +182,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; - send_request_to_lower = true; } @@ -197,24 +191,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; - smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; - send_request_to_lower = true; } //DCF Clock if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; - smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; - send_request_to_lower = true; } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; - smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000; - send_request_to_lower = true; } @@ -227,9 +215,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, pp_smu->set_hard_min_dcfclk_by_freq && pp_smu->set_min_deep_sleep_dcfclk) { - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); } } @@ -239,7 +227,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; - send_request_to_lower = true; } @@ -249,13 +236,11 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, pp_smu->set_hard_min_dcfclk_by_freq && pp_smu->set_min_deep_sleep_dcfclk) { - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); } } - - *smu_req_cur = smu_req; } static const struct clk_mgr_funcs dcn1_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 295cbd5b843f..0db2a6e96fc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -283,7 +283,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", @@ -310,7 +311,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -323,7 +325,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -337,7 +340,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -348,7 +352,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", @@ -375,7 +380,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -388,7 +394,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -402,7 +409,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -413,7 +421,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", @@ -440,7 +449,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -453,7 +463,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -467,7 +478,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -478,7 +490,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", @@ -505,7 +518,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -518,7 +532,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -532,7 +547,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -867,6 +883,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, .wm_read_state = hubbub1_wm_read_state, + .program_watermarks = hubbub1_program_watermarks, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 9cd4a5194154..85811b24a497 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -32,18 +32,14 @@ #define TO_DCN10_HUBBUB(hubbub)\ container_of(hubbub, struct dcn10_hubbub, base) -#define HUBHUB_REG_LIST_DCN()\ +#define HUBBUB_REG_LIST_DCN_COMMON()\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ @@ -54,6 +50,12 @@ SR(DCHUBBUB_TEST_DEBUG_DATA),\ SR(DCHUBBUB_SOFT_RESET) +#define HUBBUB_VM_REG_LIST() \ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D) + #define HUBBUB_SR_WATERMARK_REG_LIST()\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\ @@ -65,7 +67,8 @@ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D) #define HUBBUB_REG_LIST_DCN10(id)\ - HUBHUB_REG_LIST_DCN(), \ + HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_VM_REG_LIST(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ SR(DCHUBBUB_SDPIF_FB_TOP),\ SR(DCHUBBUB_SDPIF_FB_BASE),\ @@ -122,8 +125,7 @@ struct dcn_hubbub_registers { #define HUBBUB_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix - -#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\ +#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ @@ -133,10 +135,29 @@ struct dcn_hubbub_registers { HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh) + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) + +#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN(mask_sh), \ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ @@ -167,15 +188,35 @@ struct dcn_hubbub_registers { type FB_OFFSET;\ type AGP_BOT;\ type AGP_TOP;\ - type AGP_BASE + type AGP_BASE;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D + +#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D struct dcn_hubbub_shift { DCN_HUBBUB_REG_FIELD_LIST(uint8_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); }; struct dcn_hubbub_mask { DCN_HUBBUB_REG_FIELD_LIST(uint32_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); }; struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 0ba68d41b9c3..54b219a710d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1178,6 +1178,10 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst) REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); } +void hubp1_init(struct hubp *hubp) +{ + //do nothing +} static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr, @@ -1201,7 +1205,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_clear_underflow = hubp1_clear_underflow, .hubp_disable_control = hubp1_disable_control, .hubp_get_underflow_status = hubp1_get_underflow_status, - + .hubp_init = hubp1_init, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index db98ba361686..99d2b7e2a578 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -34,6 +34,7 @@ #define HUBP_REG_LIST_DCN(id)\ SRI(DCHUBP_CNTL, HUBP, id),\ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\ + SRI(HUBPREQ_DEBUG, HUBP, id),\ SRI(DCSURF_ADDR_CONFIG, HUBP, id),\ SRI(DCSURF_TILING_CONFIG, HUBP, id),\ SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\ @@ -138,6 +139,7 @@ #define HUBP_COMMON_REG_VARIABLE_LIST \ uint32_t DCHUBP_CNTL; \ uint32_t HUBPREQ_DEBUG_DB; \ + uint32_t HUBPREQ_DEBUG; \ uint32_t DCSURF_ADDR_CONFIG; \ uint32_t DCSURF_TILING_CONFIG; \ uint32_t DCSURF_SURFACE_PITCH; \ @@ -749,4 +751,6 @@ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); void hubp1_vready_workaround(struct hubp *hubp, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); +void hubp1_init(struct hubp *hubp); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index dab370676bfa..33d311cea28c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1118,14 +1118,17 @@ static void dcn10_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb)) { + if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct hubp *hubp = dc->res_pool->hubps[i]; struct dpp *dpp = dc->res_pool->dpps[i]; + hubp->funcs->hubp_init(hubp); dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; plane_atomic_power_down(dc, dpp, hubp); } + + apply_DEGVIDCN10_253_wa(dc); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -2436,6 +2439,8 @@ static void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { + struct hubbub *hubbub = dc->res_pool->hubbub; + if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); @@ -2449,7 +2454,7 @@ static void dcn10_prepare_bandwidth( false); } - hubbub1_program_watermarks(dc->res_pool->hubbub, + hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); @@ -2466,6 +2471,8 @@ static void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { + struct hubbub *hubbub = dc->res_pool->hubbub; + if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); @@ -2479,7 +2486,7 @@ static void dcn10_optimize_bandwidth( true); } - hubbub1_program_watermarks(dc->res_pool->hubbub, + hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index cc6891b8ea69..4fc4208d1472 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -74,29 +74,6 @@ struct pp_smu_wm_range_sets { struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS]; }; -struct pp_smu_display_requirement_rv { - /* PPSMC_MSG_SetDisplayCount: count - * 0 triggers S0i2 optimization - */ - unsigned int display_count; - - /* PPSMC_MSG_SetHardMinFclkByFreq: mhz - * FCLK will vary with DPM, but never below requested hard min - */ - unsigned int hard_min_fclk_mhz; - - /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz - * fixed clock at requested freq, either from FCH bypass or DFS - */ - unsigned int hard_min_dcefclk_mhz; - - /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz - * when DF is in cstate, dcf clock is further divided down - * to just above given frequency - */ - unsigned int min_deep_sleep_dcefclk_mhz; -}; - struct pp_smu_funcs_rv { struct pp_smu pp_smu; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 88a82a23d259..6f5ab05d6467 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -145,7 +145,6 @@ struct resource_pool { struct hubbub *hubbub; struct mpc *mpc; struct pp_smu_funcs *pp_smu; - struct pp_smu_display_requirement_rv pp_smu_req; struct dce_aux *engines[MAX_PIPES]; struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 5e8fead3c09a..93667e8b23b3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -77,6 +77,12 @@ struct hubbub_funcs { void (*get_dchub_ref_freq)(struct hubbub *hubbub, unsigned int dccg_ref_freq_inKhz, unsigned int *dchub_ref_freq_inKhz); + + void (*program_watermarks)( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index cbaa43853611..c68f0ce346c7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -70,6 +70,8 @@ struct dmcu_funcs { void (*get_psr_wait_loop)(struct dmcu *dmcu, unsigned int *psr_wait_loop_number); bool (*is_dmcu_initialized)(struct dmcu *dmcu); + bool (*lock_phy)(struct dmcu *dmcu); + bool (*unlock_phy)(struct dmcu *dmcu); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 1cd07e94ee63..455df4999797 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -130,6 +130,7 @@ struct hubp_funcs { void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); + void (*hubp_init)(struct hubp *hubp); }; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3d867e34f8b3..19b1eaebe484 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -437,10 +437,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync, inserted_frame_duration_in_us = last_render_time_in_us / frames_to_insert; - if (inserted_frame_duration_in_us < - (1000000 / in_out_vrr->max_refresh_in_uhz)) - inserted_frame_duration_in_us = - (1000000 / in_out_vrr->max_refresh_in_uhz); + if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us) + inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us; /* Cache the calculated variables */ in_out_vrr->btr.inserted_duration_in_us = diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h index 721c61171045..5a44e614ab7e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h @@ -2347,6 +2347,8 @@ #define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569 #define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP0_HUBPREQ_DEBUG 0x056a +#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f @@ -2631,6 +2633,8 @@ #define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d #define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP1_HUBPREQ_DEBUG 0x062e +#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633 @@ -2915,6 +2919,8 @@ #define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1 #define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP2_HUBPREQ_DEBUG 0x06f2 +#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7 @@ -3199,6 +3205,8 @@ #define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5 #define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP3_HUBPREQ_DEBUG 0x07b6 +#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 08769b4b7a74..d3075adb3297 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -718,6 +718,7 @@ enum atom_encoder_caps_def ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not. ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board. + ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type. }; struct atom_encoder_caps_record diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 5f3c10ebff08..b897aca9b4c9 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -85,18 +85,6 @@ enum kgd_memory_pool { KGD_POOL_FRAMEBUFFER = 3, }; -enum kgd_engine_type { - KGD_ENGINE_PFP = 1, - KGD_ENGINE_ME, - KGD_ENGINE_CE, - KGD_ENGINE_MEC1, - KGD_ENGINE_MEC2, - KGD_ENGINE_RLC, - KGD_ENGINE_SDMA1, - KGD_ENGINE_SDMA2, - KGD_ENGINE_MAX -}; - /** * enum kfd_sched_policy * @@ -230,8 +218,6 @@ struct tile_config { * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that * SDMA hqd slot. * - * @get_fw_version: Returns FW versions from the header - * * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. * Only used for no cp scheduling mode * @@ -311,8 +297,6 @@ struct kfd2kgd_calls { struct kgd_dev *kgd, uint8_t vmid); - uint16_t (*get_fw_version)(struct kgd_dev *kgd, - enum kgd_engine_type type); void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f32e3d0aaea6..9a595f7525e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -35,6 +35,7 @@ #include "smu10_hwmgr.h" #include "power_state.h" #include "soc15_common.h" +#include "smu10.h" #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ @@ -204,18 +205,13 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) return 0; } -static inline uint32_t convert_10k_to_mhz(uint32_t clock) -{ - return (clock + 99) / 100; -} - static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { - smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock); + smu10_data->deep_sleep_dcefclk != clock) { + smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, smu10_data->deep_sleep_dcefclk); @@ -228,8 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) { - smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock); + smu10_data->dcf_actual_hard_min_freq != clock) { + smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, smu10_data->dcf_actual_hard_min_freq); @@ -242,8 +238,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) { - smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock); + smu10_data->f_actual_hard_min_freq != clock) { + smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, smu10_data->f_actual_hard_min_freq); @@ -572,7 +568,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { struct smu10_hwmgr *data = hwmgr->backend; - struct amdgpu_device *adev = hwmgr->adev; uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; @@ -581,11 +576,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return 0; } - /* Disable UMDPSTATE support on rv2 temporarily */ - if ((adev->asic_type == CHIP_RAVEN) && - (adev->rev_id >= 8)) - return 0; - if (min_sclk < data->gfx_min_freq_limit) min_sclk = data->gfx_min_freq_limit; @@ -1200,6 +1190,94 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) } } +static int conv_power_profile_to_pplib_workload(int power_profile) +{ + int pplib_workload = 0; + + switch (power_profile) { + case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: + pplib_workload = WORKLOAD_DEFAULT_BIT; + break; + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_POWERSAVING: + pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + } + + return pplib_workload; +} + +static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) +{ + uint32_t i, size = 0; + static const uint8_t + profile_mode_setting[6][4] = {{70, 60, 0, 0,}, + {70, 60, 1, 3,}, + {90, 60, 0, 0,}, + {70, 60, 0, 0,}, + {70, 90, 0, 0,}, + {30, 60, 0, 6,}, + }; + static const char *profile_name[6] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE"}; + static const char *title[6] = {"NUM", + "MODE_NAME", + "BUSY_SET_POINT", + "FPS", + "USE_RLC_BUSY", + "MIN_ACTIVE_LEVEL"}; + + if (!buf) + return -EINVAL; + + size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0], + title[1], title[2], title[3], title[4], title[5]); + + for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) + size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", + i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + profile_mode_setting[i][0], profile_mode_setting[i][1], + profile_mode_setting[i][2], profile_mode_setting[i][3]); + + return size; +} + +static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) +{ + int workload_type = 0; + + if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) { + pr_err("Invalid power profile mode %ld\n", input[size]); + return -EINVAL; + } + hwmgr->power_profile_mode = input[size]; + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = + conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, + 1 << workload_type); + + return 0; +} + + static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .backend_init = smu10_hwmgr_backend_init, .backend_fini = smu10_hwmgr_backend_fini, @@ -1241,6 +1319,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .powergate_sdma = smu10_powergate_sdma, .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq, .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq, + .get_power_profile_mode = smu10_get_power_profile_mode, + .set_power_profile_mode = smu10_set_power_profile_mode, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 39a547084e90..9b9f87b84910 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -3460,7 +3460,18 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) return ; data->vce_power_gated = bgate; - vega20_enable_disable_vce_dpm(hwmgr, !bgate); + if (bgate) { + vega20_enable_disable_vce_dpm(hwmgr, !bgate); + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + } else { + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + vega20_enable_disable_vce_dpm(hwmgr, !bgate); + } + } static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index a2991fa2e6f8..90879e4092a3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -85,7 +85,6 @@ #define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 #define PPSMC_Message_Count 0x37 - typedef uint16_t PPSMC_Result; typedef int PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/powerplay/inc/smu10.h index 9e837a5014c5..b96520528240 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu10.h @@ -136,12 +136,14 @@ #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) /* Workload bits */ -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 +#define WORKLOAD_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 typedef struct { /* MP1_EXT_SCRATCH0 */ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c478b38662d0..92903a4cc4d8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1896,8 +1896,13 @@ set_fan_speed_rpm_failed: static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { - /* send msg to SMU to set pstate */ - return 0; + int ret = 0; + mutex_lock(&(smu->mutex)); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetXgmiMode, + pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); + mutex_unlock(&(smu->mutex)); + return ret; } static const struct smu_funcs smu_v11_0_funcs = { |