diff options
author | Dave Airlie | 2020-02-20 11:01:28 +1000 |
---|---|---|
committer | Dave Airlie | 2020-02-20 11:01:33 +1000 |
commit | ec0bd60a4790066bb4426f87a878816392552257 (patch) | |
tree | ee879f19234838b487d0cd7cc546663c8c477eb9 /drivers/gpu/drm | |
parent | 99edb18b86d9c37c98f182d45c0ae79070e70ac0 (diff) | |
parent | 8fc7036ee652207ca992fbb9abb64090c355a9e0 (diff) |
Merge tag 'drm-msm-fixes-2020-02-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
+ fix UBWC on GPU and display side for sc7180
+ fix DSI suspend/resume issue encountered on sc7180
+ fix some breakage on so called "linux-android" devices
(fallout from sc7180/a618 support, not seen earlier
due to bootloader/firmware differences)
+ couple other misc fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGshz5K3tJd=NsBSHq6HGT-ZRa67qt+iN=U2ZFO2oD8kuw@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 85 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_manager.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c | 6 |
9 files changed, 170 insertions, 100 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 983afeaee737..748cd379065f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - struct msm_gpu *gpu = &adreno_gpu->base; u32 val; /* @@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) return; } - /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) - == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + a6xx_bus_clear_pending_transactions(adreno_gpu); /* tell the GMU we want to slumber */ a6xx_gmu_notify_slumber(gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index daf07800cde0..68af24150de5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); int ret; - /* - * During a previous slumber, GBIF halt is asserted to ensure - * no further transaction can go through GPU before GPU - * headswitch is turned off. - * - * This halt is deasserted once headswitch goes off but - * incase headswitch doesn't goes off clear GBIF halt - * here to ensure GPU wake-up doesn't fail because of - * halted GPU transactions. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); - /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); @@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Select CP0 to always count cycles */ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); - gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + if (adreno_is_a630(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + } /* Enable fault detection */ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, @@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), }; -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if(!a6xx_has_gbif(adreno_gpu)){ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* - * GMU needs DDR access in slumber path. Deassert GBIF halt now - * to allow for GMU to access system memory. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); - /* - * Make sure the GMU is idle before continuing (because some transitions - * may use VBIF - */ - a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); - - /* Clear the VBIF pipe before shutting down */ - /* FIXME: This accesses the GPU - do we need to make sure it is on? */ - a6xx_bus_clear_pending_transactions(adreno_gpu); - return a6xx_gmu_stop(a6xx_gpu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index eda11abc5f01..e450e0b97211 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -7,6 +7,7 @@ #include "a6xx_gmu.h" #include "a6xx_gmu.xml.h" +#include "a6xx_gpu.h" #define HFI_MSG_ID(val) [val] = #val @@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) NULL, 0); } -static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5003c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* - * The sdm845 GMU doesn't do bus frequency scaling on its own but it - * does need at least one entry in the list because it might be accessed - * when the GMU is shutting down. Send a single "off" entry. + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5007c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} - msg.bw_level_num = 1; +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; - msg.ddr_cmds_num = 3; - msg.ddr_wait_bitmask = 0x07; + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; - msg.ddr_cmds_addrs[0] = 0x50000; - msg.ddr_cmds_addrs[1] = 0x5005c; - msg.ddr_cmds_addrs[2] = 0x5000c; + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5005c; + msg->ddr_cmds_addrs[2] = 0x5000c; - msg.ddr_cmds_data[0][0] = 0x40000000; - msg.ddr_cmds_data[0][1] = 0x40000000; - msg.ddr_cmds_data[0][2] = 0x40000000; + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* * These are the CX (CNOC) votes. This is used but the values for the * sdm845 GMU are known and fixed so we can hard code them. */ - msg.cnoc_cmds_num = 3; - msg.cnoc_wait_bitmask = 0x05; + msg->cnoc_cmds_num = 3; + msg->cnoc_wait_bitmask = 0x05; - msg.cnoc_cmds_addrs[0] = 0x50034; - msg.cnoc_cmds_addrs[1] = 0x5007c; - msg.cnoc_cmds_addrs[2] = 0x5004c; + msg->cnoc_cmds_addrs[0] = 0x50034; + msg->cnoc_cmds_addrs[1] = 0x5007c; + msg->cnoc_cmds_addrs[2] = 0x5004c; - msg.cnoc_cmds_data[0][0] = 0x40000000; - msg.cnoc_cmds_data[0][1] = 0x00000000; - msg.cnoc_cmds_data[0][2] = 0x40000000; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[0][1] = 0x00000000; + msg->cnoc_cmds_data[0][2] = 0x40000000; + + msg->cnoc_cmds_data[1][0] = 0x60000001; + msg->cnoc_cmds_data[1][1] = 0x20000001; + msg->cnoc_cmds_data[1][2] = 0x60000001; +} + + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - msg.cnoc_cmds_data[1][0] = 0x60000001; - msg.cnoc_cmds_data[1][1] = 0x20000001; - msg.cnoc_cmds_data[1][2] = 0x60000001; + if (adreno_is_a618(adreno_gpu)) + a618_build_bw_table(&msg); + else + a6xx_build_bw_table(&msg); return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), NULL, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 528632690f1e..a05282dede91 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = { INTERLEAVED_RGB_FMT(RGB565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 29705e773a4b..80d3cfc14007 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -12,6 +12,7 @@ #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) +#define HW_REV 0x0 #define HW_INTR_STATUS 0x0010 /* Max BW defined in KBps */ @@ -22,6 +23,17 @@ struct dpu_irq_controller { struct irq_domain *domain; }; +struct dpu_hw_cfg { + u32 val; + u32 offset; +}; + +struct dpu_mdss_hw_init_handler { + u32 hw_rev; + u32 hw_reg_count; + struct dpu_hw_cfg* hw_cfg; +}; + struct dpu_mdss { struct msm_mdss base; void __iomem *mmio; @@ -32,6 +44,44 @@ struct dpu_mdss { u32 num_paths; }; +static struct dpu_hw_cfg hw_cfg[] = { + { + /* UBWC global settings */ + .val = 0x1E, + .offset = 0x144, + } +}; + +static struct dpu_mdss_hw_init_handler cfg_handler[] = { + { .hw_rev = DPU_HW_VER_620, + .hw_reg_count = ARRAY_SIZE(hw_cfg), + .hw_cfg = hw_cfg + }, +}; + +static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev) +{ + int i; + u32 count = 0; + struct dpu_hw_cfg *hw_cfg = NULL; + + for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { + if (cfg_handler[i].hw_rev == hw_rev) { + hw_cfg = cfg_handler[i].hw_cfg; + count = cfg_handler[i].hw_reg_count; + break; + } + } + + for (i = 0; i < count; i++ ) { + writel_relaxed(hw_cfg->val, + dpu_mdss->mmio + hw_cfg->offset); + hw_cfg++; + } + + return; +} + static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, struct dpu_mdss *dpu_mdss) { @@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dss_module_power *mp = &dpu_mdss->mp; int ret; + u32 mdss_rev; dpu_mdss_icc_request_bw(mdss); ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); - if (ret) + if (ret) { DPU_ERROR("clock enable failed, ret:%d\n", ret); + return ret; + } + + mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV); + dpu_mdss_hw_init(dpu_mdss, mdss_rev); return ret; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 05cc04f729d6..e1cc541e0ef2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", - mdp5_cstate->pipeline.mixer->lm); + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 104115d112eb..4864b9558f65 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return num; } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int id = dsi_mgr_connector_get_id(connector); @@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_pll *src_pll; bool is_dual_dsi = IS_DUAL_DSI(); int ret; @@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + /* Save PLL status if it is a clock source */ + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + msm_dsi_pll_save_state(src_pll); + ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index b0cfa67d2a57..f509ebd77500 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; - /* Save PLL status if it is a clock source */ - if (phy->usecase != MSM_DSI_PHY_SLAVE) - msm_dsi_pll_save_state(phy->pll); - phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 1c894548dd72..6ac04fc303f5 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) if (pll_10nm->slave) dsi_pll_enable_pll_bias(pll_10nm->slave); + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); + if (rc) { + pr_err("vco_set_rate failed, rc=%d\n", rc); + return rc; + } + /* Start PLL */ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x01); |