aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/android/binder_alloc.c31
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/ublk_drv.c33
-rw-r--r--drivers/block/zram/zram_drv.c42
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c342
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c14
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c17
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c77
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c17
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-scmi.c9
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c34
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c40
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/memory/dfl-emif.c62
-rw-r--r--drivers/memory/of_memory.c2
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c16
-rw-r--r--drivers/net/bonding/bond_3ad.c41
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c53
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c558
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c553
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c42
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c62
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c468
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c84
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c95
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c2
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c11
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/usb/r8152.c27
-rw-r--r--drivers/net/virtio_net.c46
-rw-r--r--drivers/nfc/pn533/uart.c1
-rw-r--r--drivers/parisc/ccio-dma.c13
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c1
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/scsi_lib.c47
-rw-r--r--drivers/scsi/sd.c84
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c5
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c5
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/renesas-soc.c14
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/tee/tee_shm.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/ufs/core/ufshcd.c9
-rw-r--r--drivers/ufs/host/ufs-exynos.c2
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c48
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c29
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/cyber2000fb.c8
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c6
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/i740fb.c2
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c278
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c20
-rw-r--r--drivers/virtio/virtio_pci_common.h3
-rw-r--r--drivers/virtio/virtio_pci_legacy.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c17
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/virtio/virtio_vdpa.c16
-rw-r--r--drivers/xen/privcmd.c21
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
325 files changed, 5108 insertions, 2552 deletions
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index db6ac540e924..e534fd49a67e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 7b3ad8ed2f4e..d4c168ce428c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -370,7 +370,7 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
bool ret;
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
return false;
}
@@ -1043,11 +1043,10 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
break; \
} \
if (__items[i].integer.value > _Generic(__val, \
- u8: U8_MAX, \
- u16: U16_MAX, \
- u32: U32_MAX, \
- u64: U64_MAX, \
- default: 0U)) { \
+ u8 *: U8_MAX, \
+ u16 *: U16_MAX, \
+ u32 *: U32_MAX, \
+ u64 *: U64_MAX)) { \
ret = -EOVERFLOW; \
break; \
} \
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1014beb12802..51f4e1c5cd01 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->vma_vm_mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->vma_vm_mm);
+ goto uninitialized;
}
+
+ mmap_read_unlock(alloc->vma_vm_mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ef4508d72c02..7c128c89b454 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e3c0ba93c1a3..ad92192c7d61 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2b7d1db5c4a7..6a4a94b4cdf4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
-static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
}
@@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
}
/*
- * __ublk_fail_req() may be called from abort context or ->ubq_daemon
- * context during exiting, so lock is required.
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
*
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
@@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
struct ublk_device *ub = ubq->dev;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- bool task_exiting = current != ubq->ubq_daemon ||
- (current->flags & PF_EXITING);
+ bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
@@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
* do the copy work.
*/
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
}
mapped_bytes = ublk_map_io(ubq, req, io);
@@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
goto fail;
} else {
- struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ /*
+ * If the check pass, we know that this is a re-issued request aborted
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+ * because this ioucmd's io_uring context may be freed now if no inflight
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+ *
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+ * guarantees that here is a re-issued request aborted previously.
+ */
+ if ((io->flags & UBLK_IO_FLAG_ABORTED))
+ goto fail;
+
pdu->req = rq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 92cb929a45b7..226ea76cc819 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int version = 2;
+ int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
+ (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret = 0;
unsigned long alloced_pages;
- unsigned long handle = 0;
+ unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
@@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
+compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
+ zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
-
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
-
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (IS_ERR((void *)handle))
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (!IS_ERR((void *)handle))
+ goto compress_again;
return PTR_ERR((void *)handle);
}
@@ -1948,6 +1969,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 158c91e54850..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -81,6 +81,7 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7820c4e74289..69b3d61852ac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
- if (!cpufreq_driver->target_index)
+ if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f191a1f901ac..0eb6b617f709 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e146810c700b..d597e2656c47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3c09dcc0986e..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a699134a1e8c..cbd593f7d553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -40,10 +40,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev)
+ if (adev) {
adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
- if (adev)
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
-
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..b7bae833c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..cb00c7d6f50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a6fe3070b6..f095a2513aff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!hive->reset_domain ||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
goto init_failed;
}
/* Drop the early temporary reset domain we created for device */
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
}
@@ -4413,8 +4415,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..b1099ee79c50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index b067ce45d226..1036446abc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2641,6 +2641,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..ffda1560c648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -37,6 +37,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b4c19412625..134575a3893c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -702,7 +704,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 108e8e8a1a36..576849e95296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..f2aebbf3fbe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..a2a4dc1844c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 6fd71cb10e54..f6b1bb40e503 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,6 +53,7 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
@@ -130,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -1138,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5181,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5279,6 +5289,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5293,6 +5335,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5310,6 +5356,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..fc9c1043244c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..4603653916f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..085e613f3646 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 01e8288d09a8..1dc95ef21da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,6 +247,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +337,9 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 726a5bba40b2..a75a286e1ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dev_printk.h>
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 52816de5e17b..55284b24f113 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -494,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -546,8 +561,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -575,7 +592,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -586,9 +605,25 @@ static int soc21_common_early_init(void *handle)
break;
case IP_VERSION(11, 0, 1):
adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -683,6 +718,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ca14c3ef742e..fb2d74f30448 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..03b7066471f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..2022ffbb8dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..dc774ddf3445 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
+ &args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..22c0929d410b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -377,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a67ba8879a56..11074cc8c333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9156b041ef17..cfac13ad06ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8660d93cc405..5140d9c2bf3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b841b8b0a9d8..987bde4dca3d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index ee99974b3b62..beb025cd3dc2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +550,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
+ };
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
}
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +689,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +729,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -729,7 +777,7 @@ void dcn314_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..aeecca68dea7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -3229,7 +3236,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3254,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3466,7 +3472,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3499,19 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4292,7 +4296,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4344,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e51338441d0..66d2ae7aacf5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..7dbab15bfa68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..5908b60db313 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.198"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -213,6 +213,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -352,6 +353,7 @@ struct dc_config {
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
};
enum visual_confirm {
@@ -609,6 +611,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -751,6 +754,7 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..09b304507bad 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -601,6 +601,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +613,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
+ // TODO: Uncomment below lines once DMCUB include headers are promoted
+ //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..9544abf75e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -344,6 +344,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..564e061ccb58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..5b5d952b2b8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..3fc300cd1ce9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -465,6 +465,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..9570c2118ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 3d307dd58e9a..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 77b00f86c216..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..d97076648acb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..aedff18aff56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2153,7 +2153,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..39931d48f385 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +358,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +374,31 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
+ || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..d014580592ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,8 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..fcf67eb3478f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -145,6 +145,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..3a9e3870b3a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -132,155 +133,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1402,7 +1254,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1299,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1793,109 +1646,16 @@ static struct clock_source *dcn31_clock_source_create(
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
+ int pipe_cnt;
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1666,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2069,6 +1750,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d38341f68b17..ebd3945c71f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
uint32_t num_ways = 0;
+ uint32_t prev_addr_low = 0;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
// Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
+ if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+ /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+ surface_size += plane->plane_size.surface_pitch *
plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ prev_addr_low = plane->address.grph.addr.u.low_part;
+ } else {
+ /* We have the same fb for all the planes.
+ * Xorg always creates one giant fb that holds all surfaces,
+ * so allocating it once is sufficient.
+ * */
+ continue;
+ }
// Convert surface size + starting address to number of cache lines required
// (alignment accounted for)
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -320,7 +331,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool stereo_in_use = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +363,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D. If any plane is using stereo,
+ * don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ stereo_in_use = true;
+ break;
+ }
+ }
+ if (stereo_in_use)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -683,9 +713,11 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
cache_cursor);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index eff1f4e17689..1fad7b48bd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..8b887b552f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -867,7 +867,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -2051,6 +2051,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..955f52e6064d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -63,7 +63,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
// For bytes required in MALL, calculate based on number of MBlks required
num_mblks = (mall_region_pixels * bytes_per_pixel +
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..c8b7d6ff38f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -868,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -1662,8 +1662,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..86a3b5bfd699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +82,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -131,6 +130,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ca44df4fca74..d34e0f1314d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..d211cf6d234c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..149a1b17cdf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -355,7 +358,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..d63b4209b14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..34a5d0f87b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..8118cfc5b405 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -473,8 +473,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +493,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -983,9 +987,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states)
@@ -1014,7 +1024,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1344,7 +1356,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1386,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -2098,6 +2116,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..cb2025771646 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -757,9 +755,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
- v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelaySCL,
@@ -1167,7 +1163,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
@@ -1952,7 +1947,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2549,7 +2543,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2749,7 +2742,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,7 +3258,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
mode_lib->vba.DSCDelayPerState[i][k],
@@ -3566,7 +3557,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[i][j],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 07f8f3b8626b..05fc14a47fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -391,7 +391,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +455,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +501,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +547,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +558,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +573,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +582,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +593,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +643,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +661,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -1867,7 +1874,6 @@ void dml32_CalculateSurfaceSizeInMall(
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1939,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +1975,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2003,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2045,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2077,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2119,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2139,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2165,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2179,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2190,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2210,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,7 +3363,6 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -3406,18 +3426,45 @@ bool dml32_CalculatePrefetchSchedule(
double *VReadyOffsetPix)
{
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
@@ -3440,19 +3487,19 @@ bool dml32_CalculatePrefetchSchedule(
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
@@ -3462,15 +3509,15 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3482,21 +3529,21 @@ bool dml32_CalculatePrefetchSchedule(
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3553,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3522,9 +3569,9 @@ bool dml32_CalculatePrefetchSchedule(
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3579,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
} else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
+ Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3712,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3779,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3817,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
+ if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3898,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3917,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3926,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3945,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3972,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +3988,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4004,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4024,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4047,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,7 +4206,6 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
@@ -4221,15 +4267,37 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
@@ -4261,13 +4329,13 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
- st_vars->TotalActiveWriteback = 0;
+ TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
@@ -4277,7 +4345,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
@@ -4307,14 +4375,14 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
@@ -4325,72 +4393,72 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
* (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
/ PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
/ VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
/ PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
/ PixelClock[k] / VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
+ WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
/ (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
@@ -4409,41 +4477,41 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
(DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
+ SameTimingForFCLKChange = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4453,7 +4521,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*USRRetrainingSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
@@ -4464,42 +4532,42 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
+ DRAMClockChangeMethod = 1;
else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
@@ -4513,7 +4581,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
@@ -4521,7 +4589,7 @@ dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DET
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
@@ -4532,7 +4600,7 @@ dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l
if (BytePerPixelDETC[k] > 0) {
src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..d293856ba906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,6 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -82,7 +81,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -362,7 +360,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,7 +712,6 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -811,7 +807,6 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..c87091683b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..492aec634b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..9f3558c0ef11 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -244,13 +244,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 78620b0bd279..f745cd8f1ab7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x22
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index c02e5e576728..ac308e72241a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,9 +28,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..6db67f082d91 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4283,6 +4283,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..18ee3b5e64c5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -212,6 +212,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -219,16 +222,10 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
* - use 36831 signed pptable when pp_table_id is 3683
+ * - use 37151 signed pptable when pp_table_id is 3715
* - use 36641 signed pptable when pp_table_id is 3664 or 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -241,6 +238,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
case 3683:
pptable_id = 36831;
break;
+ case 3715:
+ pptable_id = 37151;
+ break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
@@ -478,7 +478,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
+ * - use 3664, 3683 or 3715 on request
* - use 3664 when pptable_id is 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -489,6 +489,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
break;
case 3664:
case 3683:
+ case 3715:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
@@ -2344,8 +2345,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1bbeceeb9e3c..df4a47acd724 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1792,7 +1792,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..1016d1c216d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1567,6 +1567,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1584,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1636,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 702ea803a743..39e7004de720 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
- } else if (ret) {
+ } else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 86d670c71286..ad068865ba20 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..eb09e86044c6 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..389e9f157ca5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..9f6b14ec189a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..8357dbdcab5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,14 +6,15 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..f435e06125aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
- mutex_init(&gt->tlb_invalidate_lock);
-
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..40b06adf509a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..a334787a4939 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..3804a583382b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -83,7 +84,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..2b10b96b17b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -708,7 +708,7 @@ intel_context_migrate_copy(struct intel_context *ce,
u8 src_access, dst_access;
struct i915_request *rq;
int src_sz, dst_sz;
- bool ccs_is_src;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +749,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -852,6 +854,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..6ee8d1127016 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..aa6aed837194 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..086bbe8945d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -247,7 +247,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
+ flush_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..260371716490 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1941,7 +1952,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1965,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..33a58f605d75 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..5a67995ea5fe 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -223,10 +223,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 9b84df34a6a1..8cf3352d8858 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 1b70938cfd2c..bd4ca11d3ff5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..e29175e4b44c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2b12389f841a..ee0165687239 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1605,6 +1605,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e210df65c30..97184c333526 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -912,7 +912,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 061be9a6619d..b0f3117102ca 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -8,6 +8,7 @@ config DRM_VC4
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
+ depends on PM
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 592c3b5d03e6..1e5f68704d7d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2855,7 +2855,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
@@ -2972,17 +2972,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ pm_runtime_enable(dev);
+
/*
- * We need to have the device powered up at this point to call
- * our reset hook and for the CEC init.
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
*/
- ret = vc4_hdmi_runtime_resume(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_put_ddc;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ goto err_disable_runtime_pm;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -3028,6 +3026,7 @@ err_destroy_conn:
err_destroy_encoder:
drm_encoder_cleanup(encoder);
pm_runtime_put_sync(dev);
+err_disable_runtime_pm:
pm_runtime_disable(dev);
err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 78fb1a4274a6..e47fa3465671 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret == 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 79798fc7462a..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -30,7 +30,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
- const struct smbus_methods_t *methods;
+ struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -361,6 +361,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
+ const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
@@ -368,7 +369,6 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
return -ENOMEM;
smbus_cmi->handle = device->handle;
- smbus_cmi->methods = device_get_match_data(&device->dev);
strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
device->driver_data = smbus_cmi;
@@ -376,6 +376,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
+ for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ smbus_cmi->methods =
+ (struct smbus_methods_t *) id->driver_data;
+
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 72f08171a28a..bc3ec22a62c5 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
to_erdma_access_flags(reg_wr(send_wr)->access);
regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
regmr_sge->length = cpu_to_le32(mr->ibmr.length);
- regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index a7a3d42e2016..699bd3f59cd3 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
attr->vendor_part_id = dev->pdev->device;
attr->hw_ver = dev->pdev->revision;
- attr->max_qp = dev->attrs.max_qp;
+ attr->max_qp = dev->attrs.max_qp - 1;
attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
attr->max_qp_rd_atom = dev->attrs.max_ord;
attr->max_qp_init_rd_atom = dev->attrs.max_ird;
@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_send_sge = dev->attrs.max_send_sge;
attr->max_recv_sge = dev->attrs.max_recv_sge;
attr->max_sge_rd = dev->attrs.max_sge_rd;
- attr->max_cq = dev->attrs.max_cq;
+ attr->max_cq = dev->attrs.max_cq - 1;
attr->max_cqe = dev->attrs.max_cqe;
attr->max_mr = dev->attrs.max_mr;
attr->max_pd = dev->attrs.max_pd;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a174a0eee8dc..fc94a1b25485 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 51bd66a45a11..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 327f3ab62c03..741612ba6a52 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
- cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 80d8ca6f2d46..16e9af8d8b1e 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
/* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
}
}
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
if (!priv)
return -ENOMEM;
- priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index c4f3c886ad61..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
"reg-names", core_reg_names[i]);
if (index < 0)
- return -EINVAL;
+ goto out_iounmap;
priv->core_isr[i] = of_iomap(node, index);
}
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index d0e8551bebfa..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
int ret;
struct fwnode_handle *domain_handle;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index b6f1392964b1..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-int find_pch_pic(u32 gsi)
-{
- int i;
-
- /* Find the PCH_PIC that manages this GSI. */
- for (i = 0; i < MAX_IO_PICS; i++) {
- struct pch_pic *priv = pch_pic_priv[i];
-
- if (!priv)
- return -1;
-
- if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
- return i;
- }
-
- pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
- return -1;
-}
-
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index afaf36b2f6ab..729be2c5296c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5620,6 +5620,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
* removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
+ flush_workqueue(md_rdev_misc_wq);
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
@@ -6238,11 +6239,11 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
if (mddev->event_work.func)
flush_workqueue(md_misc_wq);
- md_bitmap_destroy(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
@@ -6260,6 +6261,7 @@ void md_stop(struct mddev *mddev)
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9117fcdee1be..64d6e4cd8a3a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2639,18 +2639,18 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, enum req_op op)
{
sector_t first_bad;
int bad_sectors;
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (op == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
@@ -2780,7 +2780,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage, WRITE)
+ s, conf->tmppage, REQ_OP_WRITE)
== 0) {
/* Well, this device is dead */
pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
@@ -2814,8 +2814,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage,
- READ)) {
+ s, conf->tmppage, REQ_OP_READ)) {
case 0:
/* Well, this device is dead */
pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
diff --git a/drivers/memory/dfl-emif.c b/drivers/memory/dfl-emif.c
index 3f719816771d..da06cd30a016 100644
--- a/drivers/memory/dfl-emif.c
+++ b/drivers/memory/dfl-emif.c
@@ -24,11 +24,24 @@
#define EMIF_STAT_CLEAR_BUSY_SFT 16
#define EMIF_CTRL 0x10
#define EMIF_CTRL_CLEAR_EN_SFT 0
-#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(3, 0)
+#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(7, 0)
#define EMIF_POLL_INVL 10000 /* us */
#define EMIF_POLL_TIMEOUT 5000000 /* us */
+/*
+ * The Capability Register replaces the Control Register (at the same
+ * offset) for EMIF feature revisions > 0. The bitmask that indicates
+ * the presence of memory channels exists in both the Capability Register
+ * and Control Register definitions. These can be thought of as a C union.
+ * The Capability Register definitions are used to check for the existence
+ * of a memory channel, and the Control Register definitions are used for
+ * managing the memory-clear functionality in revision 0.
+ */
+#define EMIF_CAPABILITY_BASE 0x10
+#define EMIF_CAPABILITY_CHN_MSK_V0 GENMASK_ULL(3, 0)
+#define EMIF_CAPABILITY_CHN_MSK GENMASK_ULL(7, 0)
+
struct dfl_emif {
struct device *dev;
void __iomem *base;
@@ -106,16 +119,30 @@ emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 0);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 1);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 2);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 3);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 4);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 5);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 6);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 7);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 0);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 1);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 2);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 3);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 4);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 5);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 6);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 7);
+
emif_clear_attr(0);
emif_clear_attr(1);
emif_clear_attr(2);
emif_clear_attr(3);
+emif_clear_attr(4);
+emif_clear_attr(5);
+emif_clear_attr(6);
+emif_clear_attr(7);
+
static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf0_init_done.attr.attr,
@@ -134,6 +161,22 @@ static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf3_cal_fail.attr.attr,
&emif_attr_inf3_clear.attr.attr,
+ &emif_attr_inf4_init_done.attr.attr,
+ &emif_attr_inf4_cal_fail.attr.attr,
+ &emif_attr_inf4_clear.attr.attr,
+
+ &emif_attr_inf5_init_done.attr.attr,
+ &emif_attr_inf5_cal_fail.attr.attr,
+ &emif_attr_inf5_clear.attr.attr,
+
+ &emif_attr_inf6_init_done.attr.attr,
+ &emif_attr_inf6_cal_fail.attr.attr,
+ &emif_attr_inf6_clear.attr.attr,
+
+ &emif_attr_inf7_init_done.attr.attr,
+ &emif_attr_inf7_cal_fail.attr.attr,
+ &emif_attr_inf7_clear.attr.attr,
+
NULL,
};
@@ -143,15 +186,24 @@ static umode_t dfl_emif_visible(struct kobject *kobj,
struct dfl_emif *de = dev_get_drvdata(kobj_to_dev(kobj));
struct emif_attr *eattr = container_of(attr, struct emif_attr,
attr.attr);
+ struct dfl_device *ddev = to_dfl_dev(de->dev);
u64 val;
/*
- * This device supports upto 4 memory interfaces, but not all
+ * This device supports up to 8 memory interfaces, but not all
* interfaces are used on different platforms. The read out value of
- * CLEAN_EN field (which is a bitmap) could tell how many interfaces
- * are available.
+ * CAPABILITY_CHN_MSK field (which is a bitmap) indicates which
+ * interfaces are available.
*/
- val = FIELD_GET(EMIF_CTRL_CLEAR_EN_MSK, readq(de->base + EMIF_CTRL));
+ if (ddev->revision > 0 && strstr(attr->name, "_clear"))
+ return 0;
+
+ if (ddev->revision == 0)
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK_V0,
+ readq(de->base + EMIF_CAPABILITY_BASE));
+ else
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK,
+ readq(de->base + EMIF_CAPABILITY_BASE));
return (val & BIT_ULL(eattr->index)) ? attr->mode : 0;
}
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index dbdf87bc0b78..fcd20d85d385 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -134,6 +134,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
devm_kfree(dev, timings);
goto default_timings;
}
@@ -284,6 +285,7 @@ const struct lpddr3_timings
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
devm_kfree(dev, timings);
+ of_node_put(np_tim);
goto default_timings;
}
i++;
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index f84b98278745..d39ee7d06665 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -122,6 +122,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
}
of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 10c563999d3d..e63608834411 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -171,6 +171,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_ASPEED_TEST
bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..fc462995cf94 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4ff73d1883de..69d78604d1fc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2446,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2932,11 +2935,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
/*
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4e904850973c..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -349,6 +349,15 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
.ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -431,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -447,7 +459,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d7fb33c078e8..184608bd8999 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,7 +84,8 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
@@ -2001,36 +2002,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 50e60843020c..2f4da2c13c0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2081,7 +2081,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 4b14d80d27ed..e4f446db0ca1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -613,6 +613,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ed7d137cba99..6bd69a7e6809 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -803,9 +803,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
- if (dev->info->internal_phy[port])
+ if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
@@ -962,6 +968,7 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1001,6 +1008,14 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
@@ -1277,6 +1292,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ p = &dev->ports[port];
+
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
@@ -1286,9 +1303,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -1300,12 +1321,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
- p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1719,6 +1766,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 764ada3a0f42..0d9520dc6d2d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -65,6 +65,7 @@ struct ksz_chip_data {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..83dca9179aa0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index b4034b78c0ca..1cdce8a98d1d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -274,27 +274,98 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -547,100 +618,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -2166,7 +2516,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
@@ -2183,7 +2533,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..b34f4cdfe814 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba0f1ffac507..f46eefb5a029 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
- if (!(bp->flags & BNXT_FLAG_TPA))
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 075c6206325c..b1b17f911300 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2130,6 +2130,7 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 059f96f7a96f..a36803e79e92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1306,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 730febd19330..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f53387ed0167..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
}
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 45634579adb6..a770bab4d1ed 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2886,6 +2886,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2893,6 +2894,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2905,9 +2907,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..0cebe4b63adb 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -634,6 +634,13 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct {
+ struct timespec64 ts_phc;
+ u64 ns_sys;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
+
u64 ethtool_stats[];
};
@@ -644,5 +651,8 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+void fec_ptp_save_state(struct fec_enet_private *fep);
+int fec_ptp_restore_state(struct fec_enet_private *fep);
+
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e8e2aa1e7f01..b0d60f898249 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -285,8 +285,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -982,6 +985,9 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+
+ fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1135,7 +1141,7 @@ fec_restart(struct net_device *ndev)
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1156,6 +1162,14 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
+
/* Enable interrupts we wish to service */
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1206,6 +1220,8 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+ u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1215,6 +1231,8 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
+ fec_ptp_save_state(fep);
+
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1234,12 +1252,28 @@ fec_stop(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ if (fep->bufdesc_ex)
+ ecntl |= FEC_ECR_EN1588;
+
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ ecntl |= FEC_ECR_ETHEREN;
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..c74d04f4b2fd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -637,7 +633,36 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
+
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc_corr;
+
+ fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+}
+
+int fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ u64 ns_sys;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
+ timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
+ return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 156e92c43780..e9cd0fa6a0d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b36bf9c3e1e4..9f1d5de7bf16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6ba97a0166e..d4226161a3ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 45d097a164ad..f39440ad5c50 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2367,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -3086,12 +3086,15 @@ continue_reset:
return;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
}
/**
@@ -4085,8 +4088,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cc5b85afd437..841fa149c407 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->alloc_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a830f7f9aed0..0c4ec9264071 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -4062,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index eb40526ee179..173fe6c31341 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -2579,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2683,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -3573,6 +3584,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 262e553e3b58..3808034f7e7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8fd7c3e37f5e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 094e3c97a1ea..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..e48e29258450 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
@@ -353,7 +359,7 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
@@ -944,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (!ice_is_xdp_ena_vsi(vsi))
return -EINVAL;
- if (queue_id >= vsi->num_txq)
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -EINVAL;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d8b836a85cc3..2796e81d2726 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -7958,8 +7965,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9f06896a049b..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 5edb68a8aab1..57f27cc7724e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9426b01f462..5ace4609de47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1732,7 +1732,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
- if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
@@ -1891,10 +1891,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
- else
+ } else {
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
+ }
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1911,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- }
-
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+ mtk_ppe_check_skb(eth->ppe, skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7405c97cda66..ecf85e9ed824 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 37522352e4b2..c8e5ca65bb6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 0aef69527226..3a1f76eac542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
- struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async;
int i;
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
return;
i = 0;
- list_for_each_entry(obj, list, list_node) {
+ list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e2a9b9be5c1f..e0ce5a233d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1395,10 +1395,11 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
}
return fs;
-err_free_fs:
- kvfree(fs);
+
err_free_vlan:
mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
err:
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d858667736a3..02eb2f0fa2ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3682,7 +3682,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -4769,14 +4771,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4c1599de652c..759f7d3c2cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -696,6 +698,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,12 +717,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -836,13 +854,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
- if (!priv->fs) {
- netdev_err(priv->netdev, "FS allocation failed\n");
- return -ENOMEM;
- }
-
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed73132129aa..a9f4c652f859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (mlx5_lag_mpesw_is_activated(esw->dev))
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -3115,8 +3116,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 0f34e3c80d1f..065102278cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1236,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
+ if (!ldev->pf[i].netdev)
break;
if (i >= ldev->ports)
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev) &&
test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bec8d6d0b5f6..c085b031abfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1530,7 +1530,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1599,7 @@ err_timeout_init:
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1618,6 +1621,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index ec76a8b1acc1..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -376,8 +376,8 @@ retry:
goto out_dropped;
}
}
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
- if (event && err == -EREMOTEIO)
+ if (event && err == -EREMOTEIO) {
err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index ee2e1b7c1310..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
- devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e240cdd9cbd..30c7b0e15721 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 2e0b704b8a31..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
* enabled.
*/
struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
};
struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_ptp_traps_set;
refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
return &ptp_state->common;
err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ mutex_destroy(&ptp_state->lock);
mlxsw_sp_ptp_traps_unset(mlxsw_sp);
kfree(ptp_state);
}
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
*config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
return 0;
}
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
return -EINVAL;
}
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
*p_ing_types = ing_types;
*p_egr_types = egr_types;
return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config)
{
+ struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
&new_egr_types, &rx_filter);
if (err)
- return err;
+ goto err_get_message_types;
new_config.flags = config->flags;
new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
new_egr_types, new_config);
if (err)
- return err;
+ goto err_configure_port;
} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
if (err)
- return err;
+ goto err_deconfigure_port;
}
mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
/* Notify the ioctl caller what we are actually timestamping. */
config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 2d1628fdefc1..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..d928b75f3780 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..9e57d23e57bf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4649e4ee0e7..306026e6aa11 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ocelot->num_stats; i++)
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
+ }
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * ocelot->num_stats;
+ unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int err, j;
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
if (err)
return err;
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
stats_work);
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
err = ocelot_port_update_stats(ocelot, i);
if (err)
break;
}
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
/* check and update now */
err = ocelot_port_update_stats(ocelot, port);
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ int i, num_stats = 0;
+
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return ocelot->num_stats;
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->stats_regions);
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->offset = ocelot->stats_layout[i].offset;
+ region->base = ocelot->stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].offset;
+ last = ocelot->stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3353,17 +3370,13 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
- mutex_init(&ocelot->stats_lock);
+ spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
@@ -3511,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..330d30841cdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
+ u64 *s;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
+ spin_lock(&ocelot->stats_lock);
+
+ s = &ocelot->stats[port * OCELOT_NUM_STATS];
/* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_dropped = dev->stats.rx_dropped;
/* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..9c488953f541 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static void ocelot_pll5_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9cf82ecf191c 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1443f788ee37..0be79c516781 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -2963,6 +3030,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -3169,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3177,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 4029b4e021f8..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -474,8 +474,8 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -488,6 +488,8 @@ try_again:
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 52f9ed8db9c9..4f2b82a884b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1134,6 +1134,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 070b5ef165eb..592d29abcb1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..53a1dbeaffa6 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..6f35438cda89 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -48,7 +48,7 @@ struct ipa;
*
* The offset of registers related to resource types is computed by a macro
* that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
* source endpoint resources or the ipa_resource_type_dst enumerated type
* for destination endpoint resources.
*
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ee6087e7b2bf..c6d271e5687e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -462,11 +462,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -3661,7 +3656,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -4000,6 +3994,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0c6efd792690..12ff276b80ae 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,11 +316,11 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- /* If we managed to get here with the PHY state machine in a state other
- * than PHY_HALTED this is an indication that something went wrong and
- * we should most likely be using MAC managed PM and we are not.
+ /* If we manged to get here with the PHY state machine in a state neither
+ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
+ * and we should most likely be using MAC managed PM and we are not.
*/
- WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
ret = phy_init_hw(phydev);
if (ret < 0)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0f6efaabaa32..d142ac8fcf6e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5906,6 +5906,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -6431,21 +6436,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6549,11 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d934774e9733..9cce7dec7366 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1211,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1229,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -3432,29 +3432,6 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
(unsigned int)GOOD_PACKET_LEN);
}
-static void virtnet_config_sizes(struct virtnet_info *vi, u32 *sizes)
-{
- u32 i, rx_size, tx_size;
-
- if (vi->speed == SPEED_UNKNOWN || vi->speed < SPEED_10000) {
- rx_size = 1024;
- tx_size = 1024;
-
- } else if (vi->speed < SPEED_40000) {
- rx_size = 1024 * 4;
- tx_size = 1024 * 4;
-
- } else {
- rx_size = 1024 * 8;
- tx_size = 1024 * 8;
- }
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- sizes[rxq2vq(i)] = rx_size;
- sizes[txq2vq(i)] = tx_size;
- }
-}
-
static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
@@ -3462,7 +3439,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
- u32 *sizes;
bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
@@ -3490,15 +3466,10 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx = NULL;
}
- sizes = kmalloc_array(total_vqs, sizeof(*sizes), GFP_KERNEL);
- if (!sizes)
- goto err_sizes;
-
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
callbacks[total_vqs - 1] = NULL;
names[total_vqs - 1] = "control";
- sizes[total_vqs - 1] = 64;
}
/* Allocate/initialize parameters for send/receive virtqueues */
@@ -3513,10 +3484,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx[rxq2vq(i)] = true;
}
- virtnet_config_sizes(vi, sizes);
-
- ret = virtio_find_vqs_ctx_size(vi->vdev, total_vqs, vqs, callbacks,
- names, sizes, ctx, NULL);
+ ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+ names, ctx, NULL);
if (ret)
goto err_find;
@@ -3536,8 +3505,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
err_find:
- kfree(sizes);
-err_sizes:
kfree(ctx);
err_ctx:
kfree(names);
@@ -3897,9 +3864,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
- virtnet_init_settings(dev);
- virtnet_update_settings(vi);
-
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
@@ -3912,6 +3876,8 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
+ virtnet_init_settings(dev);
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
vi->failover = net_failover_create(vi->dev);
if (IS_ERR(vi->failover)) {
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2caf997f9bc9..07596bf5f7d6 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9be007c9420f..f223afe47d10 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -268,7 +268,7 @@ static int ioc_count;
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
-* This was was copied from sba_iommu.c. Don't try to unify
+* This was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1e4a5663d011..d4be9d2ee74d 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */
- strlcpy(lcd_text, str, sizeof(lcd_text));
+ strscpy(lcd_text, str, sizeof(lcd_text));
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 8be13d416f48..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -928,7 +928,6 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 67feed25c9db..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -328,6 +328,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7150b1d0159e..d8373cb04f90 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4784,10 +4784,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- consumers[i].consumer = NULL;
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
"Failed to get supply '%s'",
consumers[i].supply);
+ consumers[i].consumer = NULL;
goto err;
}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 81c4f5776109..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -158,7 +158,6 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool * ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8f1d1cf23d44..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -2086,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 896896e32664..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -637,7 +637,6 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a3e117a4b8e7..f6c37a97544e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index e48d4261d0bc..09c5fe37754c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -5310,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2b2f68288375..62666df1a59e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -6935,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4dbd29ab1dcc..ef08029a0079 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -111,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -121,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -651,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
return bytes;
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
-{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
-}
-
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
struct request *req = scsi_cmd_to_rq(cmd);
@@ -676,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -772,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_TRANSPORT;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -832,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -926,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -942,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
@@ -979,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1542,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
cmd->eh_eflags = 0;
- cmd->allowed = 0;
cmd->prot_type = 0;
cmd->prot_flags = 0;
cmd->submitter = 0;
@@ -1593,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8f79fa6318fe..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -103,7 +103,6 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static void sd_start_done_work(struct work_struct *work);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
@@ -3471,7 +3470,6 @@ static int sd_probe(struct device *dev)
sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
- INIT_WORK(&sdkp->start_done_work, sd_start_done_work);
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
@@ -3594,69 +3592,12 @@ static void scsi_disk_release(struct device *dev)
kfree(sdkp);
}
-/* Process sense data after a START command finished. */
-static void sd_start_done_work(struct work_struct *work)
-{
- struct scsi_disk *sdkp = container_of(work, typeof(*sdkp),
- start_done_work);
- struct scsi_sense_hdr sshdr;
- int res = sdkp->start_result;
-
- if (res == 0)
- return;
-
- sd_print_result(sdkp, "Start/Stop Unit failed", res);
-
- if (res < 0)
- return;
-
- if (scsi_normalize_sense(sdkp->start_sense_buffer,
- sdkp->start_sense_len, &sshdr))
- sd_print_sense_hdr(sdkp, &sshdr);
-}
-
-/* A START command finished. May be called from interrupt context. */
-static void sd_start_done(struct request *req, blk_status_t status)
-{
- const struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
- struct scsi_disk *sdkp = scsi_disk(req->q->disk);
-
- sdkp->start_result = scmd->result;
- WARN_ON_ONCE(scmd->sense_len > SCSI_SENSE_BUFFERSIZE);
- sdkp->start_sense_len = scmd->sense_len;
- memcpy(sdkp->start_sense_buffer, scmd->sense_buffer,
- ARRAY_SIZE(sdkp->start_sense_buffer));
- WARN_ON_ONCE(!schedule_work(&sdkp->start_done_work));
-}
-
-/* Submit a START command asynchronously. */
-static int sd_submit_start(struct scsi_disk *sdkp, u8 cmd[], u8 cmd_len)
-{
- struct scsi_device *sdev = sdkp->device;
- struct request_queue *q = sdev->request_queue;
- struct request *req;
- struct scsi_cmnd *scmd;
-
- req = scsi_alloc_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- scmd = blk_mq_rq_to_pdu(req);
- scmd->cmd_len = cmd_len;
- memcpy(scmd->cmnd, cmd, cmd_len);
- scmd->allowed = sdkp->max_retries;
- req->timeout = SD_TIMEOUT;
- req->rq_flags |= RQF_PM | RQF_QUIET;
- req->end_io = sd_start_done;
- blk_execute_rq_nowait(req, /*at_head=*/true);
-
- return 0;
-}
-
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
+ struct scsi_sense_hdr sshdr;
struct scsi_device *sdp = sdkp->device;
+ int res;
if (start)
cmd[4] |= 1; /* START */
@@ -3667,10 +3608,23 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- /* Wait until processing of sense data has finished. */
- flush_work(&sdkp->start_done_work);
+ res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+ if (res) {
+ sd_print_result(sdkp, "Start/Stop Unit failed", res);
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
+ sd_print_sense_hdr(sdkp, &sshdr);
+ /* 0x3a is medium not present */
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
+ }
- return sd_submit_start(sdkp, cmd, sizeof(cmd));
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
+
+ return 0;
}
/*
@@ -3697,8 +3651,6 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
-
- flush_work(&sdkp->start_done_work);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index b89187761d61..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -150,11 +150,6 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
-
- int start_result;
- u32 start_sense_len;
- u8 start_sense_buffer[SCSI_SENSE_BUFFERSIZE];
- struct work_struct start_done_work;
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fe000da11332..8ced292c4b96 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 2be3afe6c2e3..dd5f2a13ceb5 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -469,6 +469,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
{
const struct meson_ee_pwrc_domain_data *match;
struct regmap *regmap_ao, *regmap_hhi;
+ struct device_node *parent_np;
struct meson_ee_pwrc *pwrc;
int i, ret;
@@ -495,7 +496,9 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.num_domains = match->count;
- regmap_hhi = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_hhi = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index b4615b288625..312fd9afccb0 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -273,6 +273,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
struct meson_gx_pwrc_vpu *vpu_pd;
+ struct device_node *parent_np;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
@@ -291,7 +292,9 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
- regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_ao = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
return PTR_ERR(regmap_ao);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index c50a6ce1b99d..f95a1337450d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,6 +44,7 @@ config ARCH_RZG2L
bool
select PM
select PM_GENERIC_DOMAINS
+ select RENESAS_RZG2L_IRQC
config ARCH_RZN1
bool
@@ -332,6 +333,16 @@ config ARCH_R9A09G011
endif # ARM64
+if RISCV
+
+config ARCH_R9A07G043
+ bool "RISC-V Platform support for RZ/Five"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/Five SoC.
+
+endif # RISCV
+
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index d171f1b635c7..621ceaa047d4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -50,6 +50,10 @@ static const struct renesas_family fam_rza2 __initconst __maybe_unused = {
.name = "RZ/A2",
};
+static const struct renesas_family fam_rzfive __initconst __maybe_unused = {
+ .name = "RZ/Five",
+};
+
static const struct renesas_family fam_rzg1 __initconst __maybe_unused = {
.name = "RZ/G1",
.reg = 0xff000044, /* PRR (Product Register) */
@@ -102,6 +106,11 @@ static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
.id = 0x40,
};
+static const struct renesas_soc soc_rz_five __initconst __maybe_unused = {
+ .family = &fam_rzfive,
+ .id = 0x847c447,
+};
+
static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
.family = &fam_rzg1,
.id = 0x45,
@@ -320,6 +329,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779mb", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
@@ -358,8 +368,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
#if defined(CONFIG_ARCH_R9A07G043)
+#ifdef CONFIG_RISCV
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
+#else
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
#endif
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..e4cb52e1fe26 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8f97a3eacdea..83da8862b8f2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = { \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
ssize_t len; \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
@@ -1275,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1432,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
spi_set_cs(msg->spi, true, false);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..1175f3a46859 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -326,6 +326,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 80d4e0676083..365489bf4b8c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
- if (!priv->data_vault)
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
goto out_free;
bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
goto free_imok;
}
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
if (result)
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
if (priv->data_vault) {
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
free_uuid:
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
- if (priv->data_vault)
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6a5d0ae5d7a4..50d50cec7774 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1329,6 +1329,7 @@ free_tz:
kfree(tz);
return ERR_PTR(result);
}
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 6bc679d22927..a202d7d5240d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8741,6 +8741,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
+ unsigned long deadline;
+ int32_t remaining;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -8773,9 +8775,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ deadline = jiffies + 10 * HZ;
for (retries = 3; retries > 0; --retries) {
+ ret = -ETIMEDOUT;
+ remaining = deadline - jiffies;
+ if (remaining <= 0)
+ break;
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ remaining / HZ, 0, 0, RQF_PM, NULL);
if (!scsi_status_is_check_condition(ret) ||
!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != UNIT_ATTENTION)
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index eced97538082..c3628a8645a5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1711,7 +1711,7 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
.pa_dbg_option_suite = 0x2E820183,
};
-struct exynos_ufs_drv_data fsd_ufs_drvs = {
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.uic_attr = &fsd_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index bd4dc97d4d34..db568f67e4dc 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
static int __init sti_setup(char *str)
{
if (str)
- strlcpy (default_sti_path, str, sizeof (default_sti_path));
+ strscpy(default_sti_path, str, sizeof(default_sti_path));
return 1;
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a3e6faed7745..14eb718bd67c 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3891,7 +3891,7 @@ static int __init atyfb_setup(char *options)
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
- strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+ strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6851f47613e1..a14a8d73035c 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1980,7 +1980,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+ strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
info->fix.smem_len = rinfo->video_ram;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2094,34 +2094,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tmp;
/* framebuffer size */
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200) ||
(rinfo->family == CHIP_FAMILY_RS300) ||
(rinfo->family == CHIP_FAMILY_RC410) ||
(rinfo->family == CHIP_FAMILY_RS400) ||
(rinfo->family == CHIP_FAMILY_RS480) ) {
- u32 tom = INREG(NB_TOM);
- tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
- radeon_fifo_wait(6);
- OUTREG(MC_FB_LOCATION, tom);
- OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
- /* This is supposed to fix the crtc2 noise problem. */
- OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
- (rinfo->family == CHIP_FAMILY_RS200)) {
- /* This is to workaround the asic bug for RMX, some versions
- of BIOS doesn't have this register initialized correctly.
- */
- OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
- ~CRTC_H_CUTOFF_ACTIVE_EN);
- }
- } else {
- tmp = INREG(CNFG_MEMSIZE);
+ u32 tom = INREG(NB_TOM);
+
+ tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
+ OUTREG(MC_FB_LOCATION, tom);
+ OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+ /* This is supposed to fix the crtc2 noise problem. */
+ OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ (rinfo->family == CHIP_FAMILY_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ * of BIOS doesn't have this register initialized correctly.
+ */
+ OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+ ~CRTC_H_CUTOFF_ACTIVE_EN);
+ }
+ } else {
+ tmp = INREG(CNFG_MEMSIZE);
}
/* mem size is bits [28:0], mask off the rest */
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index e7702fe1fe7d..6403ae07970d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void bw2_init_fix(struct fb_info *info, int linebytes)
{
- strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+ strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_MONO01;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894af26f8..2b00a9d554fc 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index a41a75841e10..2a9fa06881b5 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1999,7 +1999,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
}
/* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 771ce1f76951..a1061c2f1640 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->var.vmode = FB_VMODE_NONINTERLACED;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.accel = FB_ACCEL_NONE;
- strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
fb_videomode_to_var(&info->var, &cfb->mode);
ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cf9ac4da0a82..098b62f7b701 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5)) {
- strlcpy(fontname, options + 5, sizeof(fontname));
+ strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
- int resize;
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
if ((p->userfont = userfont))
REFCOUNT(data)++;
+
+ old_width = vc->vc_font.width;
+ old_height = vc->vc_font.height;
+ old_charcount = vc->vc_font.charcount;
+
vc->vc_font.width = w;
vc->vc_font.height = h;
vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
+ if (ret)
+ goto err_out;
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (old_data && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
+
+err_out:
+ p->fontdata = old_data;
+ vc->vc_font.data = (void *)old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+ REFCOUNT(data)--;
+ }
+
+ vc->vc_font.width = old_width;
+ vc->vc_font.height = old_height;
+ vc->vc_font.charcount = old_charcount;
+
+ return ret;
}
/*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index c2a60b187467..4d7f63892dcc 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
if (WARN_ON(refcount_read(&info->count)))
return;
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ mutex_destroy(&info->bl_curve_mutex);
+#endif
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index d45355b9a58c..8f041f9b14c7 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1134,7 +1134,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->info = int_cfb_info;
- strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+ strscpy(info->dev_name, int_cfb_info->fb.fix.id,
sizeof(info->dev_name));
}
@@ -1229,7 +1229,7 @@ static int cyber2000fb_ddc_getsda(void *data)
static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
- strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1304,7 +1304,7 @@ static int cyber2000fb_i2c_getscl(void *data)
static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
- strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1500,7 @@ static int cyber2000fb_setup(char *options)
if (strncmp(opt, "font:", 5) == 0) {
static char default_font_storage[40];
- strlcpy(default_font_storage, opt + 5,
+ strscpy(default_font_storage, opt + 5,
sizeof(default_font_storage));
default_font = default_font_storage;
continue;
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index b3d580e57221..7cba3969a970 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
} else
ffb_type_name = "Elite 3D";
- strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 5d34d89fb665..e41204ecb0e3 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -410,13 +410,13 @@ static void __init gx1fb_setup(char *options)
continue;
if (!strncmp(this_opt, "mode:", 5))
- strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+ strscpy(mode_option, this_opt + 5, sizeof(mode_option));
else if (!strncmp(this_opt, "crt:", 4))
crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "panel:", 6))
- strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+ strscpy(panel_option, this_opt + 6, sizeof(panel_option));
else
- strlcpy(mode_option, this_opt, sizeof(mode_option));
+ strscpy(mode_option, this_opt, sizeof(mode_option));
}
}
#endif
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index e5475ae1e158..94588b809ebf 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -650,7 +650,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
- strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+ strscpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 7f09a0daaaa2..bd30d8314b68 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -159,7 +159,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index d97d7456d15a..94f3bc637fc8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -681,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->devtype = pdev->id_entry->driver_data;
- strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 236521b19daf..68bba2688f4c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2383,9 +2383,9 @@ static int __init matroxfb_setup(char *options) {
else if (!strncmp(this_opt, "mem:", 4))
mem = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "mode:", 5))
- strlcpy(videomode, this_opt+5, sizeof(videomode));
+ strscpy(videomode, this_opt + 5, sizeof(videomode));
else if (!strncmp(this_opt, "outputs:", 8))
- strlcpy(outputs, this_opt+8, sizeof(outputs));
+ strscpy(outputs, this_opt + 8, sizeof(outputs));
else if (!strncmp(this_opt, "dfp:", 4)) {
dfp_type = simple_strtoul(this_opt+4, NULL, 0);
dfp = 1;
@@ -2455,7 +2455,7 @@ static int __init matroxfb_setup(char *options) {
else if (!strcmp(this_opt, "dfp"))
dfp = value;
else {
- strlcpy(videomode, this_opt, sizeof(videomode));
+ strscpy(videomode, this_opt, sizeof(videomode));
}
}
}
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index dfb4ddc45701..17cda5765683 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1642,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
fbdev->int_irq = platform_get_irq(pdev, 0);
- if (!fbdev->int_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->int_irq < 0) {
r = ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
- if (!fbdev->ext_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->ext_irq < 0) {
r = ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index afa688e754b9..5ccddcfce722 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
{
memset(&fbi->var, 0, sizeof(fbi->var));
memset(&fbi->fix, 0, sizeof(fbi->fix));
- strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+ strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
}
static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index d3be2c64f1c0..8fd79deb1e2a 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -617,6 +617,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e943300d23e8..d5d0bbd39213 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
- strlcpy(info->fix.id, mi->id, 16);
+ strscpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 66cfc3e9d3cf..696ac5431180 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
return -ENODEV;
if (options)
- strlcpy(g_options, options, sizeof(g_options));
+ strscpy(g_options, options, sizeof(g_options));
return 0;
}
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 5069f6f67923..67b63a753cb2 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -248,7 +248,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index cf2a90ecd64e..e770b4a356b5 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
if (!p || p == prop->name)
continue;
- strlcpy(name, prop->name,
+ strscpy(name, prop->name,
strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
regulator = devm_regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index f28fd69d5eb7..c9e77429dfa3 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -649,37 +649,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
- if(ivideo->sisvga_engine == SIS_300_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+ if (ivideo->sisvga_engine == SIS_300_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
- switch(vbflags & VB_DISPTYPE_DISP2) {
+ switch (vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
- if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
- (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
- if(sisbios_mode[myindex].xres > xres)
+ if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+ (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+ if (sisbios_mode[myindex].xres > xres)
return -1;
- if(myres > yres)
+ if (myres > yres)
return -1;
}
- if(ivideo->sisfb_fstn) {
- if(sisbios_mode[myindex].xres == 320) {
- if(myres == 240) {
- switch(sisbios_mode[myindex].mode_no[1]) {
+ if (ivideo->sisfb_fstn) {
+ if (sisbios_mode[myindex].xres == 320) {
+ if (myres == 240) {
+ switch (sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
@@ -688,7 +688,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
}
}
- if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
@@ -696,14 +696,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
break;
case CRT2_TV:
- if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
- if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
@@ -1872,7 +1872,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+ strscpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2204,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
- bool mustwait = false;
- u8 sr1F, cr17;
+ bool mustwait = false;
+ u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
- u8 cr63=0;
+ u8 cr63 = 0;
#endif
- u16 temp = 0xffff;
- int i;
+ u16 temp = 0xffff;
+ int i;
+
+ sr1F = SiS_GetReg(SISSR, 0x1F);
+ SiS_SetRegOR(SISSR, 0x1F, 0x04);
+ SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- sr1F = SiS_GetReg(SISSR, 0x1F);
- SiS_SetRegOR(SISSR, 0x1F, 0x04);
- SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- if(sr1F & 0xc0) mustwait = true;
+ if (sr1F & 0xc0)
+ mustwait = true;
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
- cr63 &= 0x40;
- SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+ cr63 &= 0x40;
+ SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+ }
#endif
- cr17 = SiS_GetReg(SISCR, 0x17);
- cr17 &= 0x80;
- if(!cr17) {
- SiS_SetRegOR(SISCR, 0x17, 0x80);
- mustwait = true;
- SiS_SetReg(SISSR, 0x00, 0x01);
- SiS_SetReg(SISSR, 0x00, 0x03);
- }
+ cr17 = SiS_GetReg(SISCR, 0x17);
+ cr17 &= 0x80;
- if(mustwait) {
- for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
- }
+ if (!cr17) {
+ SiS_SetRegOR(SISCR, 0x17, 0x80);
+ mustwait = true;
+ SiS_SetReg(SISSR, 0x00, 0x01);
+ SiS_SetReg(SISSR, 0x00, 0x03);
+ }
+ if (mustwait) {
+ for (i = 0; i < 10; i++)
+ sisfbwaitretracecrt1(ivideo);
+ }
#ifdef CONFIG_FB_SIS_315
- if(ivideo->chip >= SIS_330) {
- SiS_SetRegAND(SISCR, 0x32, ~0x20);
- if(ivideo->chip >= SIS_340) {
- SiS_SetReg(SISCR, 0x57, 0x4a);
- } else {
- SiS_SetReg(SISCR, 0x57, 0x5f);
- }
- SiS_SetRegOR(SISCR, 0x53, 0x02);
- while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
- while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
- if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
- SiS_SetRegAND(SISCR, 0x53, 0xfd);
- SiS_SetRegAND(SISCR, 0x57, 0x00);
- }
+ if (ivideo->chip >= SIS_330) {
+ SiS_SetRegAND(SISCR, 0x32, ~0x20);
+ if (ivideo->chip >= SIS_340)
+ SiS_SetReg(SISCR, 0x57, 0x4a);
+ else
+ SiS_SetReg(SISCR, 0x57, 0x5f);
+
+ SiS_SetRegOR(SISCR, 0x53, 0x02);
+ while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+ break;
+ while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+ break;
+ if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+ temp = 1;
+
+ SiS_SetRegAND(SISCR, 0x53, 0xfd);
+ SiS_SetRegAND(SISCR, 0x57, 0x00);
+ }
#endif
- if(temp == 0xffff) {
- i = 3;
- do {
- temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
- ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
- } while(((temp == 0) || (temp == 0xffff)) && i--);
+ if (temp == 0xffff) {
+ i = 3;
- if((temp == 0) || (temp == 0xffff)) {
- if(sisfb_test_DDC1(ivideo)) temp = 1;
- }
- }
+ do {
+ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+ ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+ } while (((temp == 0) || (temp == 0xffff)) && i--);
- if((temp) && (temp != 0xffff)) {
- SiS_SetRegOR(SISCR, 0x32, 0x20);
- }
+ if ((temp == 0) || (temp == 0xffff)) {
+ if (sisfb_test_DDC1(ivideo))
+ temp = 1;
+ }
+ }
+
+ if ((temp) && (temp != 0xffff))
+ SiS_SetRegOR(SISCR, 0x32, 0x20);
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA)
+ SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
#endif
- SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
- SiS_SetReg(SISSR, 0x1F, sr1F);
+ SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+ SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2299,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
- if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+ if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
- if(ivideo->vbflags2 & VB2_30xBDH)
+ if (ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
- if(reg & 0x08)
+ if (reg & 0x08)
return;
realcrtno = 1;
- if(ivideo->SiS_Pr.DDCPortMixup)
+ if (ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
- if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+ if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
@@ -2320,17 +2326,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
- } while((temp) && i--);
+ } while ((temp) && i--);
- if(temp)
+ if (temp)
return;
/* No digital device */
- if(!(buffer[0x14] & 0x80))
+ if (!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
- if(!(buffer[0x18] & 0x02))
+ if (!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2344,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
switch(xres) {
case 1024:
- if(yres == 768)
+ if (yres == 768)
paneltype = 0x02;
break;
case 1280:
- if(yres == 1024)
+ if (yres == 1024)
paneltype = 0x03;
break;
case 1600:
- if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+ if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
- if(!paneltype)
+ if (!paneltype)
return;
- if(buffer[0x23])
+ if (buffer[0x23])
cr37 |= 0x10;
- if((buffer[0x47] & 0x18) == 0x18)
+ if ((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
@@ -2372,31 +2378,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
- int temp, mytest, result, i, j;
-
- for(j = 0; j < 10; j++) {
- result = 0;
- for(i = 0; i < 3; i++) {
- mytest = test;
- SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
- temp = (type >> 8) | (mytest & 0x00ff);
- SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
- mytest >>= 8;
- mytest &= 0x7f;
- temp = SiS_GetReg(SISPART4, 0x03);
- temp ^= 0x0e;
- temp &= mytest;
- if(temp == mytest) result++;
+ int temp, mytest, result, i, j;
+
+ for (j = 0; j < 10; j++) {
+ result = 0;
+ for (i = 0; i < 3; i++) {
+ mytest = test;
+ SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+ temp = (type >> 8) | (mytest & 0x00ff);
+ SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+ mytest >>= 8;
+ mytest &= 0x7f;
+ temp = SiS_GetReg(SISPART4, 0x03);
+ temp ^= 0x0e;
+ temp &= mytest;
+ if (temp == mytest)
+ result++;
#if 1
- SiS_SetReg(SISPART4, 0x11, 0x00);
- SiS_SetRegAND(SISPART4, 0x10, 0xe0);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+ SiS_SetReg(SISPART4, 0x11, 0x00);
+ SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
- }
- if((result == 0) || (result >= 2)) break;
- }
- return result;
+ }
+
+ if ((result == 0) || (result >= 2))
+ break;
+ }
+ return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4271,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+ for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
- if(RankCapacity != PseudoRankCapacity)
+ if (RankCapacity != PseudoRankCapacity)
continue;
- if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+ if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
- if(iteration == 3) { /* Rank No */
+ if (iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4295,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
- if(buswidth == 4) sr14 |= 0x80;
- else if(buswidth == 2) sr14 |= 0x40;
+
+ if (buswidth == 4)
+ sr14 |= 0x80;
+ else if (buswidth == 2)
+ sr14 |= 0x40;
+
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
- if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
- (BankNumMid + PhysicalAdrHigh >= mapsize) ||
- (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
- (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+ if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+ (BankNumMid + PhysicalAdrHigh >= mapsize) ||
+ (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
+ (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
@@ -4312,7 +4324,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
- if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+ if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
@@ -5867,7 +5879,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->cardnumber++;
}
- strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+ strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
@@ -6150,24 +6162,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
- /* if((ivideo->chip == SIS_315H) ||
- (ivideo->chip == SIS_315) ||
- (ivideo->chip == SIS_315PRO) ||
- (ivideo->chip == SIS_330)) {
- sisfb_post_sis315330(pdev);
- } else */ if(ivideo->chip == XGI_20) {
+
+ if (ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
- } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+ } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
- if(!result) {
+ if (!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6a52eba64559..fce6cfbadfd6 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
enable = 0;
}
- strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+ strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5c765655d000..52e4ed9da78c 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (par->area_color_enable || par->low_power) {
u32 mode;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 27d4b0ace2d6..cd4d640f9477 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1382,7 +1382,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
sst_get_memsize(info, &fix->smem_len);
- strlcpy(fix->id, spec->name, sizeof(fix->id));
+ strscpy(fix->id, spec->name, sizeof(fix->id));
printk(KERN_INFO "%s (revision %d) with %s dac\n",
fix->id, par->revision, par->dac_sw.name);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 15b079505a00..490bd9a14763 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+ strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1d3bacd9d5ac..1279b02234f8 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -84,7 +84,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 9daf17b11106..f7b463633ba0 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -207,7 +207,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
info->pseudo_palette = ep->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
info->fix.smem_start = ep->fb_base_phys;
info->fix.smem_len = ep->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 1638a40fed22..01d87f53324d 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
else
tcx_name = "TCX24";
- strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 67e37a62b07c..8a8122f8bfeb 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1264,7 +1264,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
@@ -1293,7 +1293,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ae0cf5540636..1fff5fd7ab51 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1344,7 +1344,7 @@ tgafb_init_fix(struct fb_info *info)
memory_size = 16777216;
}
- strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 319131bd72cf..cda095420ee8 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -270,7 +270,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
{
struct tridentfb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index c492a57531c6..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -360,7 +360,7 @@ static void vm_synchronize_cbs(struct virtio_device *vdev)
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, u32 size, bool ctx)
+ const char *name, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info;
@@ -395,11 +395,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
- if (!size || size > num)
- size = num;
-
/* Create the vring */
- vq = vring_create_virtqueue(index, size, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, true, ctx, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
@@ -477,7 +474,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -503,7 +499,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 00ad476a815d..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -174,7 +174,6 @@ error:
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -187,7 +186,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
if (!info)
return ERR_PTR(-ENOMEM);
- vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, size, ctx,
+ vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
msix_vec);
if (IS_ERR(vq))
goto out_info;
@@ -284,7 +283,7 @@ void vp_del_vqs(struct virtio_device *vdev)
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], bool per_vq_vectors,
+ const char * const names[], bool per_vq_vectors,
const bool *ctx,
struct irq_affinity *desc)
{
@@ -327,8 +326,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
- ctx ? ctx[i] : false, msix_vec);
+ ctx ? ctx[i] : false,
+ msix_vec);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
@@ -358,7 +357,7 @@ error_find:
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx)
+ const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err, queue_idx = 0;
@@ -380,7 +379,6 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
@@ -398,21 +396,21 @@ out_del_vqs:
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, true, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, false, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, sizes, ctx);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
}
const char *vp_bus_name(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index c0448378b698..23112d84218f 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -80,7 +80,6 @@ struct virtio_pci_device {
unsigned int idx,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec);
void (*del_vq)(struct virtio_pci_vq_info *info);
@@ -111,7 +110,7 @@ void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc);
const char *vp_bus_name(struct virtio_device *vdev);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index d75e5c4e637f..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,7 +112,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -126,13 +125,10 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
return ERR_PTR(-ENOENT);
- if (!size || size > num)
- size = num;
-
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, size,
+ vq = vring_create_virtqueue(index, num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
true, false, ctx,
vp_notify, callback, name);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f7965c5dd36b..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,7 +293,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -311,18 +310,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (!size || size > num)
- size = num;
-
- if (size & (size - 1)) {
- dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", size);
+ if (num & (num - 1)) {
+ dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
return ERR_PTR(-EINVAL);
}
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, size,
+ vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, true, ctx,
vp_notify, callback, name);
@@ -351,15 +347,12 @@ err:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char * const names[],
- u32 sizes[],
- const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
- int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, ctx,
- desc);
+ int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
if (rc)
return rc;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d66c8e6d0ef3..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2426,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 9bc4d110b800..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -131,7 +131,7 @@ static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
static struct virtqueue *
virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, u32 size, bool ctx)
+ const char *name, bool ctx)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -168,17 +168,14 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
- if (!size || size > max_num)
- size = max_num;
-
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
- may_reduce_num = (size == min_num) ? false : true;
+ may_reduce_num = (max_num == min_num) ? false : true;
/* Create the vring */
align = ops->get_vq_align(vdpa);
- vq = vring_create_virtqueue(index, size, align, vdev,
+ vq = vring_create_virtqueue(index, max_num, align, vdev,
true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
@@ -272,7 +269,6 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -288,9 +284,9 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, callbacks[i],
- names[i], sizes ? sizes[i] : 0,
- ctx ? ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
+ callbacks[i], names[i], ctx ?
+ ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 3369734108af..e88e8f6f0a33 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7a0c93acc2c5..d3dcda344989 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 07b010a68fcf..f44d5a64351e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
pr_warn("bus_id %s no slash\n", bus_id);
return -EINVAL;