aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2022-11-18 13:31:40 -0800
committerLinus Torvalds2022-11-18 13:31:40 -0800
commitb5bf1d8a23a683d56be574a934a8296912efc758 (patch)
tree1ead6c0ef909964d50b2801ae68628fcf3b92dbc
parentab290eaddc4c41b237b9a366fa6a5527be890b84 (diff)
parentb1010b93fe9074f965ebf86e0e382a8a7a71500d (diff)
Merge tag 'drm-fixes-2022-11-19' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "I guess the main question is are things settling down, and I'd say kinda, these are all pretty small fixes, nothing big stands out really, just seems to be quite a few of them. Mostly amdgpu and core fixes, with some i915, tegra, vc4, panel bits. core: - Fix potential memory leak in drm_dev_init() - Fix potential null-ptr-deref in drm_vblank_destroy_worker() - Revert hiding unregistered connectors from userspace, as it breaks on DP-MST - Add workaround for DP++ dual mode adaptors that don't support i2c subaddressing i915: - Fix uaf with lmem_userfault_list handling amdgpu: - gang submit fixes - Fix a possible memory leak in ganng submit error path - DP tunneling fixes - DCN 3.1 page flip fix - DCN 3.2.x fixes - DCN 3.1.4 fixes - Don't expose degamma on hardware that doesn't support it - BACO fixes for SMU 11.x - BACO fixes for SMU 13.x - Virtual display fix for devices with no display hardware amdkfd: - Memory limit regression fix tegra: - tegra20 GART fix vc4: - Fix error handling in vc4_atomic_commit_tail() lima: - Set lima's clkname corrrectly when regulator is missing panel: - Set bpc for logictechno panels" * tag 'drm-fixes-2022-11-19' of git://anongit.freedesktop.org/drm/drm: (28 commits) gpu: host1x: Avoid trying to use GART on Tegra20 drm/display: Don't assume dual mode adaptors support i2c sub-addressing drm/amd/pm: fix SMU13 runpm hang due to unintentional workaround drm/amd/pm: enable runpm support over BACO for SMU13.0.7 drm/amd/pm: enable runpm support over BACO for SMU13.0.0 drm/amdgpu: there is no vbios fb on devices with no display hw (v2) drm/amdkfd: Fix a memory limit issue drm/amdgpu: disable BACO support on more cards drm/amd/display: don't enable DRM CRTC degamma property for DCE drm/amd/display: Set max for prefetch lines on dcn32 drm/amd/display: use uclk pstate latency for fw assisted mclk validation dcn32 drm/amd/display: Fix prefetch calculations for dcn32 drm/amd/display: Fix optc2_configure warning on dcn314 drm/amd/display: Fix calculation for cursor CAB allocation Revert "drm: hide unregistered connectors from GETCONNECTOR IOCTL" drm/amd/display: Support parsing VRAM info v3.0 from VBIOS drm/amd/display: Fix invalid DPIA AUX reply causing system hang drm/amdgpu: Add psp_13_0_10_ta firmware to modinfo drm/amd/display: Add HUBP surface flip interrupt handler drm/amd/display: Fix access timeout to DPIA AUX at boot time ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c32
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c30
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c51
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c6
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c15
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c8
-rw-r--r--drivers/gpu/host1x/dev.c4
37 files changed, 310 insertions, 116 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8639a4f9c6e8..2eca58220550 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1293,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 84f44f7e4111..6d1ff7b9258d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && adev->kfd.vram_used + vram_needed >
- adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size) -
- reserved_for_pt)) {
+ adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d371000a5727..d038b258cc92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
return r;
++(num_ibs[r]);
+ p->gang_leader_idx = r;
return 0;
}
@@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size)
- return -EINVAL;
+ if (!p->gang_size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
@@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (ret)
goto free_all_kdata;
}
- p->gang_leader = p->jobs[p->gang_size - 1];
+ p->gang_leader = p->jobs[p->gang_leader_idx];
if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
ret = -ECANCELED;
@@ -1195,16 +1198,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- for (i = 0; i < p->gang_size - 1; ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
+ if (p->jobs[i] == leader)
+ continue;
+
r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
if (r)
return r;
}
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r && r != -ERESTARTSYS)
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-
return r;
}
@@ -1238,9 +1243,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i)
drm_sched_job_arm(&p->jobs[i]->base);
- for (i = 0; i < (p->gang_size - 1); ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
struct dma_fence *fence;
+ if (p->jobs[i] == leader)
+ continue;
+
fence = &p->jobs[i]->base.s_fence->scheduled;
r = amdgpu_sync_fence(&leader->sync, fence);
if (r)
@@ -1276,7 +1284,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
list_for_each_entry(e, &p->validated, tv.head) {
/* Everybody except for the gang leader uses READ */
- for (i = 0; i < (p->gang_size - 1); ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
+ if (p->jobs[i] == leader)
+ continue;
+
dma_resv_add_fence(e->tv.bo->base.resv,
&p->jobs[i]->base.s_fence->finished,
DMA_RESV_USAGE_READ);
@@ -1286,7 +1297,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
e->tv.num_shared = 0;
}
- seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
p->fence);
amdgpu_cs_post_dependencies(p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index cbaa19b2b8a3..f80adf9069ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -54,6 +54,7 @@ struct amdgpu_cs_parser {
/* scheduler job objects */
unsigned int gang_size;
+ unsigned int gang_leader_idx;
struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
struct amdgpu_job *gang_leader;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 64510898eedd..f1e9663b4051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6044,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
dma_fence_put(old);
return NULL;
}
+
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ /* chips with display hardware */
+ return true;
+ default:
+ /* IP discovery */
+ if (!adev->ip_versions[DCE_HWIP][0] ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+ return false;
+ return true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 34233a74248c..b06cb0bb166c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -656,7 +656,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev) ||
- !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
+ !amdgpu_device_has_display_hardware(adev)) {
size = 0;
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 21d822b1d589..88f9b327183a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 509739d83b5a..3e1ecca72430 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -147,6 +147,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* Number of bytes in PSP footer for firmware. */
#define PSP_FOOTER_BYTES 0x100
+/*
+ * DMUB Async to Sync Mechanism Status
+ */
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
+#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
+
/**
* DOC: overview
*
@@ -1637,12 +1645,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
}
- if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
- goto error;
- }
-
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
@@ -1650,6 +1652,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc))
dc_enable_dmub_outbox(adev->dm.dc);
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -10109,6 +10117,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
*operation_result = AUX_RET_ERROR_TIMEOUT;
} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
} else {
*operation_result = AUX_RET_ERROR_UNKNOWN;
}
@@ -10156,6 +10166,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+
+ if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
+ DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
+ payload->address, payload->length,
+ adev->dm.dmub_notify->aux_reply.length);
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
+ DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
+ (uint32_t *)operation_result);
+ }
+
memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
adev->dm.dmub_notify->aux_reply.length);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b5ce15c43bcc..635c398fcefe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -51,12 +51,6 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 594fe8a4d02b..64dd02970292 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -412,7 +412,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
-
+ bool is_dcn;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -450,8 +450,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
acrtc->otg_inst = -1;
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+
+ /* Don't enable DRM CRTC degamma property for DCE since it doesn't
+ * support programmable degamma anywhere.
+ */
+ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
+ drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
+
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
return 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ee0456b5e14e..e0c8d6f09bb4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2393,6 +2393,26 @@ static enum bp_result get_vram_info_v25(
return result;
}
+static enum bp_result get_vram_info_v30(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v3_0 *info_v30;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0,
+ DATA_TABLES(vram_info));
+
+ if (info_v30 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v30->channel_num;
+ info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
+
+ return result;
+}
+
+
/*
* get_integrated_info_v11
*
@@ -3060,6 +3080,16 @@ static enum bp_result bios_parser_get_vram_info(
}
break;
+ case 3:
+ switch (revision.minor) {
+ case 0:
+ result = get_vram_info_v30(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
default:
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 84e1486f3d51..39a57bcd7866 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 47eb162f1a75..7dd36e402bac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -237,7 +237,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc2_configure_crc,
+ .configure_crc = optc1_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index cf5bd9713f54..ac41a763bb1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
using the max for calculation */
if (hubp->curs_attr.width > 0) {
- // Round cursor width to next multiple of 64
- cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+ cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (pipe->stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
@@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
cursor_size > 16384) {
/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
*/
- cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
- DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+ DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
+ dc->caps.cache_line_size + 2;
}
break;
}
@@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- //Round cursor width up to next multiple of 64
- int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
- int cursor_height = hubp->curs_attr.height;
- int cursor_size = cursor_width * cursor_height;
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (hubp->curs_attr.color_format) {
case CURSOR_MODE_MONO:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 659323ebd79d..12e17bcfca3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1803,6 +1803,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
+ * prefetch is scheduled correctly to account for dummy pstate.
+ */
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
@@ -1990,6 +1996,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+
dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
@@ -1997,8 +2007,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
+ }
}
static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 244fd15d24b4..9afd9ba23fb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -718,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
do {
MaxTotalRDBandwidth = 0;
+ DestinationLineTimesForPrefetchLessThan2 = false;
+ VRatioPrefetchMoreThanMax = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index f82e14cd9d8a..c8b28c83ddf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -46,6 +46,8 @@
// Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE__ 4.0
+#define __DML_VBA_MAX_DST_Y_PRE__ 63.75
+
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 635fc54338fa..debe46b24a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3475,7 +3475,6 @@ bool dml32_CalculatePrefetchSchedule(
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
- double TPreMargin = 0;
if (v->GPUVMEnable == true && v->HostVMEnable == true)
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
@@ -3669,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
@@ -3701,8 +3701,6 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
-
- TPreMargin = Tpre_rounded - TPreReq;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
@@ -3730,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (dst_y_prefetch_equ > 1 && TPreMargin > 0.0) {
+ if (dst_y_prefetch_equ > 1 &&
+ (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 4fe75dd2b329..b880f4d7d67e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu)
uint64_t features_supported;
int ret = 0;
- if (adev->in_suspend && smu_is_dpm_running(smu)) {
- dev_info(adev->dev, "dpm has been enabled\n");
- /* this is needed specifically */
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 11):
- case IP_VERSION(11, 5, 0):
- case IP_VERSION(11, 0, 12):
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ if (adev->in_suspend && smu_is_dpm_running(smu)) {
+ dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
if (ret)
dev_err(adev->dev, "Failed system features control!\n");
- break;
- default:
- break;
+ return ret;
}
- return ret;
+ break;
+ default:
+ break;
}
ret = smu_init_display_count(smu, 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index e2fa3b066b96..f816b1dd110e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1388,6 +1388,14 @@ enum smu_cmn2asic_mapping_type {
CMN2ASIC_MAPPING_WORKLOAD,
};
+enum smu_baco_seq {
+ BACO_SEQ_BACO = 0,
+ BACO_SEQ_MSR,
+ BACO_SEQ_BAMACO,
+ BACO_SEQ_ULPS,
+ BACO_SEQ_COUNT,
+};
+
#define MSG_MAP(msg, index, valid_in_vf) \
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index a9215494dcdd..d466db6f0ad4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -147,14 +147,6 @@ struct smu_11_5_power_context {
uint32_t max_fast_ppt_limit;
};
-enum smu_v11_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v11_0_init_microcode(struct smu_context *smu);
@@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu);
int smu_v11_0_baco_exit(struct smu_context *smu);
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq);
+ enum smu_baco_seq baco_seq);
int smu_v11_0_mode1_reset(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 80fb583b18d9..b7f4569aff2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -124,14 +124,6 @@ struct smu_13_0_power_context {
enum smu_13_0_power_state power_state;
};
-enum smu_v13_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v13_0_init_microcode(struct smu_context *smu);
@@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
bool smu_v13_0_baco_is_support(struct smu_context *smu);
enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 6212fd270857..697e98a0a20a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -379,6 +379,10 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
((adev->pdev->device == 0x73BF) &&
(adev->pdev->revision == 0xCF)) ||
((adev->pdev->device == 0x7422) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73A3) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73E3) &&
(adev->pdev->revision == 0x00)))
smu_baco->platform_support = false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index dccbd9f70723..70b560737687 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
}
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq)
+ enum smu_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 43fb102a65f5..89f0f6eb19f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2230,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
return ret;
}
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+}
+
bool smu_v13_0_baco_is_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 29529328152d..f0121d171630 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1566,6 +1567,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_0_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_0_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1827,8 +1853,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_0_baco_enter,
+ .baco_exit = smu_v13_0_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c4102cfb734c..d74debc584f8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -122,6 +122,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1578,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_7_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1655,8 +1681,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_7_baco_enter,
+ .baco_exit = smu_v13_0_7_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index 3ea53bb67d3b..bd61e20770a5 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -63,23 +63,45 @@
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
+ u8 zero = 0;
+ char *tmpbuf = NULL;
+ /*
+ * As sub-addressing is not supported by all adaptors,
+ * always explicitly read from the start and discard
+ * any bytes that come before the requested offset.
+ * This way, no matter whether the adaptor supports it
+ * or not, we'll end up reading the proper data.
+ */
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
- .buf = &offset,
+ .buf = &zero,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
- .len = size,
+ .len = size + offset,
.buf = buffer,
},
};
int ret;
+ if (offset) {
+ tmpbuf = kmalloc(size + offset, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ msgs[1].buf = tmpbuf;
+ }
+
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (tmpbuf)
+ memcpy(buffer, tmpbuf + offset, size);
+
+ kfree(tmpbuf);
+
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
@@ -208,18 +230,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
- /*
- * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
- * the offset but ignore it, and instead they just always return
- * data from the start of the HDMI ID buffer. So for a broken
- * type 1 HDMI adaptor a single byte read will always give us
- * 0x44, and for a type 1 DVI adaptor it should give 0x00
- * (assuming it implements any registers). Fortunately neither
- * of those values will match the type 2 signature of the
- * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
- * the type 2 adaptor detection safely even in the presence
- * of broken type 1 adaptors.
- */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
@@ -233,11 +243,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
/*
- * If neither a proper type 1 ID nor a broken type 1 adaptor
- * as described above, assume type 1, but let the user know
- * that we may have misdetected the type.
+ * If not a proper type 1 ID, still assume type 1, but let
+ * the user know that we may have misdetected the type.
*/
- if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ if (!is_type1_adaptor(adaptor_id))
drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
}
@@ -343,10 +352,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
- * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
- * some type 1 adaptors have problems with registers (see comments
- * in drm_dp_dual_mode_detect()) we avoid touching the register,
- * making this function a no-op on type 1 adaptors.
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register.
+ * Type1 adaptors do not support any register writes.
*
* Returns:
* 0 on success, negative error code on failure
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8214a0b1ab7f..203bf8d6c34c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -615,7 +615,7 @@ static int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
- ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 7bb98e6a446d..5ea5e260118c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -104,7 +104,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
{
- kthread_destroy_worker(vblank->worker);
+ if (vblank->worker)
+ kthread_destroy_worker(vblank->worker);
}
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 939d621c9ad4..688c8afe0bf1 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
- continue;
-
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index a4aa9500fa17..3d4305eea1aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1013,9 +1013,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (i915_ttm_cpu_maps_iomem(bo->resource))
- wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
-
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1042,6 +1039,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 011be7ff51e1..bc8fb4e38d0a 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -112,11 +112,6 @@ int lima_devfreq_init(struct lima_device *ldev)
unsigned long cur_freq;
int ret;
const char *regulator_names[] = { "mali", NULL };
- const char *clk_names[] = { "core", NULL };
- struct dev_pm_opp_config config = {
- .regulator_names = regulator_names,
- .clk_names = clk_names,
- };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -124,7 +119,15 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_config(dev, &config);
+ /*
+ * clkname is set separately so it is not affected by the optional
+ * regulator setting which may return error.
+ */
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
+
+ ret = devm_pm_opp_set_regulators(dev, regulator_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2944228a8e2c..8a3b685c2fcc 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2500,6 +2500,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = {
static const struct panel_desc logictechno_lt161010_2nh = {
.timings = &logictechno_lt161010_2nh_timing,
.num_timings = 1,
+ .bpc = 6,
.size = {
.width = 154,
.height = 86,
@@ -2529,6 +2530,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = {
static const struct panel_desc logictechno_lt170410_2whc = {
.timings = &logictechno_lt170410_2whc_timing,
.num_timings = 1,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6748ec1e0005..a1f909dac89a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
* likely to be allocated beyond the 32-bit boundary if sufficient
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4419e810103d..0a6347c05df4 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -197,8 +197,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
- if (IS_ERR(priv_state))
- return ERR_CAST(priv_state);
+ if (!priv_state)
+ return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
@@ -210,8 +210,8 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
- if (IS_ERR(priv_state))
- return ERR_CAST(priv_state);
+ if (!priv_state)
+ return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 0cd3f97e7e49..f60ea24db0ec 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host)
static bool host1x_wants_iommu(struct host1x *host1x)
{
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled, there's no need to enable