diff options
Diffstat (limited to 'drivers/pci')
62 files changed, 24248 insertions, 0 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig new file mode 100644 index 00000000000..22a56f4ca38 --- /dev/null +++ b/drivers/pci/Kconfig @@ -0,0 +1,434 @@ +menuconfig PCI + bool "PCI support" + depends on DM + default y if PPC + help + Enable support for PCI (Peripheral Interconnect Bus), a type of bus + used on some devices to allow the CPU to communicate with its + peripherals. + + This subsystem requires driver model. + +if PCI + +config DM_PCI_COMPAT + bool "Enable compatible functions for PCI" + help + Enable compatibility functions for PCI so that old code can be used + with CONFIG_PCI enabled. This should be used as an interim + measure when porting a board to use driver model for PCI. Once the + board is fully supported, this option should be disabled. + +config SYS_PCI_64BIT + bool "Enable 64-bit PCI resources" + default y if PPC + help + Enable 64-bit PCI resource access. + +config PCI_AARDVARK + bool "Enable Aardvark PCIe driver" + depends on DM_GPIO + depends on ARMADA_3700 + help + Say Y here if you want to enable PCIe controller support on + Armada37x0 SoCs. The PCIe controller on Armada37x0 is based on + Aardvark hardware. + +config PCI_PNP + bool "Enable Plug & Play support for PCI" + default y + help + Enable PCI memory and I/O space resource allocation and assignment. + +config SPL_PCI_PNP + bool "Enable Plug & Play support for PCI in SPL" + depends on SPL_PCI + help + Enable PCI memory and I/O space resource allocation and assignment. + + This is required to auto configure the enumerated devices. + + This is normally not done in SPL, but can be enabled if devices must + be set up in the SPL phase. Often it is enough to manually configure + one device, so this option can be disabled. + +config PCI_REGION_MULTI_ENTRY + bool "Enable Multiple entries of region type MEMORY in ranges for PCI" + help + Enable PCI memory regions to be of multiple entry. Multiple entry + here refers to allow more than one count of address ranges for MEMORY + region type. This helps to add support for SoC's like OcteonTX/TX2 + where every peripheral is on the PCI bus. + +config PCI_CONFIG_HOST_BRIDGE + bool "Configure PCI host bridges" + default y if X86 + +config PCI_MAP_SYSTEM_MEMORY + bool "Map local system memory from a virtual base address" + depends on MIPS + help + Say Y if base address of system memory is being used as a virtual address + instead of a physical address (e.g. on MIPS). The PCI core will then remap + the virtual memory base address to a physical address when adding the PCI + region of type PCI_REGION_SYS_MEMORY. + This should only be required on MIPS where CFG_SYS_SDRAM_BASE is still + being used as virtual address. + +config PCI_SRIOV + bool "Enable Single Root I/O Virtualization support for PCI" + help + Say Y here if you want to enable PCI Single Root I/O Virtualization + capability support. This helps to enumerate Virtual Function devices + if available on a PCI Physical Function device and probe for + applicable drivers. + +config PCI_ENHANCED_ALLOCATION + bool "Enable support for Enhanced Allocation of resources" + default y + help + Enable support for Enhanced Allocation which can be used by supported + devices in place of traditional BARS for allocation of resources. + +config PCI_ARID + bool "Enable Alternate Routing-ID support for PCI" + help + Say Y here if you want to enable Alternate Routing-ID capability + support on PCI devices. This helps to skip some devices in BDF + scan that are not present. + +config PCI_SCAN_SHOW + bool "Show PCI devices during startup" + depends on PCIE_IMX + +config PCIE_ECAM_GENERIC + bool "Generic ECAM-based PCI host controller support" + help + Say Y here if you want to enable support for generic ECAM-based + PCIe host controllers, such as the one emulated by QEMU. + +config PCIE_ECAM_SYNQUACER + bool "SynQuacer ECAM-based PCI host controller support" + select PCI_INIT_R + select PCI_REGION_MULTI_ENTRY + help + Say Y here if you want to enable support for Socionext + SynQuacer SoC's ECAM-based PCIe host controllers. + Note that this must be configured when boot because Linux driver + expects the PCIe RC has been configured in the bootloader. + +config PCIE_APPLE + bool "Enable Apple PCIe driver" + depends on ARCH_APPLE + imply PCI_INIT_R + select SYS_PCI_64BIT + default y + help + Say Y here if you want to enable PCIe controller support on + Apple SoCs. + +config PCI_FTPCI100 + bool "Enable Faraday FTPCI100 PCI Bridge Controller driver" + help + Say Y here if you want to enable Faraday FTPCI100 PCI. + FTPCI100 IP is used in SoC chip designs. + +config PCI_GT64120 + bool "GT64120 PCI support" + depends on MIPS + +config PCI_PHYTIUM + bool "Phytium PCIe support" + help + Say Y here if you want to enable PCIe controller support on + Phytium SoCs. + +config PCIE_DW_MVEBU + bool "Enable Armada-8K PCIe driver (DesignWare core)" + depends on ARMADA_8K + help + Say Y here if you want to enable PCIe controller support on + Armada-8K SoCs. The PCIe controller on Armada-8K is based on + DesignWare hardware. + +config PCIE_DW_SIFIVE + bool "Enable SiFive FU740 PCIe" + depends on CLK_SIFIVE_PRCI + depends on RESET_SIFIVE + depends on SIFIVE_GPIO + select PCIE_DW_COMMON + help + Say Y here if you want to enable PCIe controller support on + FU740. + +config SYS_FSL_PCI_VER_3_X + bool + +config PCIE_FSL + bool "FSL PowerPC PCIe support" + select SYS_FSL_PCI_VER_3_X if ARCH_T2080 || ARCH_T4240 + help + Say Y here if you want to enable PCIe controller support on FSL + PowerPC MPC85xx, MPC86xx, B series, P series and T series SoCs. + This driver does not support SRIO_PCIE_BOOT feature. + +config PCI_MPC85XX + bool "MPC85XX PowerPC PCI support" + help + Say Y here if you want to enable PCI controller support on FSL + PowerPC MPC85xx SoC. + +config PCI_MSC01 + bool "MSC01 PCI support" + depends on TARGET_MALTA + +config PCI_RCAR_GEN2 + bool "Renesas RCar Gen2 PCIe driver" + depends on RCAR_32 + help + Say Y here if you want to enable PCIe controller support on + Renesas RCar Gen2 SoCs. The PCIe controller on RCar Gen2 is + also used to access EHCI USB controller on the SoC. + +config PCI_RCAR_GEN3 + bool "Renesas RCar Gen3 PCIe driver" + depends on RCAR_GEN3 + help + Say Y here if you want to enable PCIe controller support on + Renesas RCar Gen3 SoCs. + +config PCI_SANDBOX + bool "Sandbox PCI support" + depends on SANDBOX + help + Support PCI on sandbox, as an emulated bus. This permits testing of + PCI feature such as bus scanning, device configuration and device + access. The available (emulated) devices are defined statically in + the device tree but the normal PCI scan technique is used to find + then. + +config SH7751_PCI + bool "SH7751 PCI controller support" + depends on SH + help + SuperH PCI Bridge Configuration + +config PCI_TEGRA + bool "Tegra PCI support" + depends on ARCH_TEGRA + depends on (TEGRA186 && POWER_DOMAIN) || (!TEGRA186) + help + Enable support for the PCIe controller found on some generations of + Tegra. Tegra20 has 2 root ports with a total of 4 lanes, Tegra30 has + 3 root ports with a total of 6 lanes and Tegra124 has 2 root ports + with a total of 5 lanes. Some boards require this for Ethernet + support to work (e.g. beaver, jetson-tk1). + +config PCI_OCTEONTX + bool "OcteonTX PCI support" + depends on (ARCH_OCTEONTX || ARCH_OCTEONTX2) + help + Enable support for the OcteonTX/TX2 SoC family ECAM/PEM controllers. + These controllers provide PCI configuration access to all on-board + peripherals so it should only be disabled for testing purposes + +config PCIE_OCTEON + bool "MIPS Octeon PCIe support" + depends on ARCH_OCTEON + help + Enable support for the MIPS Octeon SoC family PCIe controllers. + +config PCI_XILINX + bool "Xilinx AXI Bridge for PCI Express" + help + Enable support for the Xilinx AXI bridge for PCI express, an IP block + which can be used on some generations of Xilinx FPGAs. + +config PCIE_LAYERSCAPE + bool + +config PCIE_LAYERSCAPE_RC + bool "Layerscape PCIe Root Complex mode support" + select PCIE_LAYERSCAPE + help + Enable Layerscape PCIe Root Complex mode driver support. The Layerscape + SoC may have one or several PCIe controllers. Each controller can be + configured to Root Complex mode by clearing the corresponding bit of + RCW[HOST_AGT_PEX]. + +config PCI_IOMMU_EXTRA_MAPPINGS + bool "Support for specifying extra IOMMU mappings for PCI" + depends on PCIE_LAYERSCAPE_RC + help + Enable support for specifying extra IOMMU mappings for PCI + controllers through a special env var called "pci_iommu_extra" or + through a device tree property named "pci-iommu-extra" placed in + the node describing the PCI controller. + The intent is to cover SR-IOV scenarios which need mappings for VFs + and PCI hot-plug scenarios. More documentation can be found under: + arch/arm/cpu/armv8/fsl-layerscape/doc/README.pci_iommu_extra + +config PCIE_LAYERSCAPE_EP + bool "Layerscape PCIe Endpoint mode support" + select PCIE_LAYERSCAPE + select PCI_ENDPOINT + help + Enable Layerscape PCIe Endpoint mode driver support. The Layerscape + SoC may have one or several PCIe controllers. Each controller can be + configured to Endpoint mode by setting the corresponding bit of + RCW[HOST_AGT_PEX]. + +config PCIE_LAYERSCAPE_GEN4 + bool "Layerscape Gen4 PCIe support" + help + Support PCIe Gen4 on NXP Layerscape SoCs, which may have one or + several PCIe controllers. The PCIe controller can work in RC or + EP mode according to RCW[HOST_AGT_PEX] setting. + +config FSL_PCIE_COMPAT + string "PCIe compatible of Kernel DT" + depends on PCIE_LAYERSCAPE_RC || PCIE_LAYERSCAPE_GEN4 + default "fsl,ls1012a-pcie" if ARCH_LS1012A + default "fsl,ls1028a-pcie" if ARCH_LS1028A + default "fsl,ls1043a-pcie" if ARCH_LS1043A + default "fsl,ls1046a-pcie" if ARCH_LS1046A + default "fsl,ls2080a-pcie" if ARCH_LS2080A + default "fsl,ls1088a-pcie" if ARCH_LS1088A + default "fsl,ls2088a-pcie" if ARCH_LX2160A || ARCH_LX2162A + default "fsl,ls1021a-pcie" if ARCH_LS1021A + help + This compatible is used to find pci controller node in Kernel DT + to complete fixup. + +config FSL_PCIE_EP_COMPAT + string "PCIe EP compatible of Kernel DT" + depends on PCIE_LAYERSCAPE_RC || PCIE_LAYERSCAPE_GEN4 + default "fsl,ls-pcie-ep" + help + This compatible is used to find pci controller ep node in Kernel DT + to complete fixup. + +config PCIE_IMX + bool "i.MX PCIe support" + depends on ARCH_MX6 + +config PCIE_INTEL_FPGA + bool "Intel FPGA PCIe support" + help + Say Y here if you want to enable PCIe controller support on Intel + FPGA, example Stratix 10. + +config PCIE_IPROC + bool "Iproc PCIe support" + help + Broadcom iProc PCIe controller driver. + Say Y here if you want to enable Broadcom iProc PCIe controller, + +config PCI_MVEBU + bool "Enable Kirkwood / Armada 370/XP/375/38x PCIe driver" + depends on (ARCH_KIRKWOOD || ARCH_MVEBU) + select MISC + select DM_RESET + select DM_GPIO + help + Say Y here if you want to enable PCIe controller support on + Kirkwood and Armada 370/XP/375/38x SoCs. + +config PCIE_DW_COMMON + bool + +config PCI_KEYSTONE + bool "TI Keystone PCIe controller" + select PCIE_DW_COMMON + help + Say Y here if you want to enable PCI controller support on AM654 SoC. + +config PCIE_MEDIATEK + bool "MediaTek PCIe Gen2 controller" + depends on ARCH_MEDIATEK + help + Say Y here if you want to enable Gen2 PCIe controller, + which could be found on MT7623 SoC family. + +config PCIE_MEDIATEK_GEN3 + bool "MediaTek PCIe Gen3 controller" + depends on ARCH_MEDIATEK + help + Say Y here if you want to enable Gen3 PCIe controller, + which could be found on the Mediatek Filogic SoC family. + +config PCIE_DW_MESON + bool "Amlogic Meson DesignWare based PCIe controller" + depends on ARCH_MESON + select PCIE_DW_COMMON + help + Say Y here if you want to enable DW PCIe controller support on + Amlogic SoCs. + +config PCIE_ROCKCHIP + bool "Enable Rockchip PCIe driver" + depends on ARCH_ROCKCHIP + select PHY_ROCKCHIP_PCIE + default y if ROCKCHIP_RK3399 + help + Say Y here if you want to enable PCIe controller support on + Rockchip SoCs. + +config PCIE_DW_ROCKCHIP + bool "Rockchip DesignWare based PCIe controller" + depends on ARCH_ROCKCHIP + select PCIE_DW_COMMON + select PHY_ROCKCHIP_SNPS_PCIE3 + help + Say Y here if you want to enable DW PCIe controller support on + Rockchip SoCs. + +config PCI_BRCMSTB + bool "Broadcom STB PCIe controller" + depends on ARCH_BCM283X + help + Say Y here if you want to enable support for PCIe controller + on Broadcom set-top-box (STB) SoCs. + This driver currently supports only BCM2711 SoC and RC mode + of the controller. + +config PCIE_UNIPHIER + bool "Socionext UniPhier PCIe driver" + depends on ARCH_UNIPHIER + select PHY_UNIPHIER_PCIE + help + Say Y here if you want to enable PCIe controller support on + UniPhier SoCs. + +config PCIE_XILINX_NWL + bool "Xilinx NWL PCIe controller" + depends on ARCH_ZYNQMP + help + Say 'Y' here if you want support for Xilinx / AMD NWL PCIe + controller as Root Port. + +config PCIE_PLDA_COMMON + bool + +config PCIE_STARFIVE_JH7110 + bool "Enable Starfive JH7110 PCIe driver" + select PCIE_PLDA_COMMON + imply STARFIVE_JH7110 + imply CLK_JH7110 + imply RESET_JH7110 + help + Say Y here if you want to enable PLDA XpressRich PCIe controller + support on StarFive JH7110 SoC. + +config PCIE_DW_IMX + bool "i.MX DW PCIe controller support" + depends on ARCH_IMX8M + select PCIE_DW_COMMON + select DM_REGULATOR + select REGMAP + select SYSCON + help + Say Y here if you want to enable DW PCIe controller support on + iMX SoCs. + +endif diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile new file mode 100644 index 00000000000..5b2d2969802 --- /dev/null +++ b/drivers/pci/Makefile @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# (C) Copyright 2000-2007 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. + +obj-$(CONFIG_VIDEO) += pci_rom.o +obj-$(CONFIG_PCI) += pci-uclass.o pci_auto.o +obj-$(CONFIG_DM_PCI_COMPAT) += pci_compat.o +obj-$(CONFIG_PCI_SANDBOX) += pci_sandbox.o +obj-$(CONFIG_SANDBOX) += pci-emul-uclass.o +obj-$(CONFIG_X86) += pci_x86.o pci_rom.o +obj-$(CONFIG_PCI) += pci_auto_common.o pci_common.o + +obj-$(CONFIG_PCIE_ECAM_GENERIC) += pcie_ecam_generic.o +obj-$(CONFIG_PCIE_ECAM_SYNQUACER) += pcie_ecam_synquacer.o +obj-$(CONFIG_PCIE_APPLE) += pcie_apple.o +obj-$(CONFIG_PCI_FTPCI100) += pci_ftpci100.o +obj-$(CONFIG_PCI_GT64120) += pci_gt64120.o +obj-$(CONFIG_PCI_MPC85XX) += pci_mpc85xx.o +obj-$(CONFIG_PCI_MSC01) += pci_msc01.o +obj-$(CONFIG_PCIE_IMX) += pcie_imx.o +obj-$(CONFIG_PCI_MVEBU) += pci_mvebu.o +obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o +obj-$(CONFIG_PCI_RCAR_GEN3) += pci-rcar-gen3.o +obj-$(CONFIG_SH7751_PCI) +=pci_sh7751.o +obj-$(CONFIG_PCI_TEGRA) += pci_tegra.o +obj-$(CONFIG_PCIE_IPROC) += pcie_iproc.o +obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o +obj-$(CONFIG_PCIE_DW_MVEBU) += pcie_dw_mvebu.o +obj-$(CONFIG_PCIE_FSL) += pcie_fsl.o pcie_fsl_fixup.o +obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape.o +obj-$(CONFIG_PCIE_LAYERSCAPE_RC) += pcie_layerscape_rc.o \ + pcie_layerscape_fixup.o \ + pcie_layerscape_fixup_common.o +obj-$(CONFIG_PCIE_LAYERSCAPE_EP) += pcie_layerscape_ep.o +obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie_layerscape_gen4.o \ + pcie_layerscape_gen4_fixup.o \ + pcie_layerscape_fixup_common.o +obj-$(CONFIG_PCI_XILINX) += pcie_xilinx.o +obj-$(CONFIG_PCI_PHYTIUM) += pcie_phytium.o +obj-$(CONFIG_PCIE_INTEL_FPGA) += pcie_intel_fpga.o +obj-$(CONFIG_PCIE_DW_COMMON) += pcie_dw_common.o +obj-$(CONFIG_PCI_KEYSTONE) += pcie_dw_ti.o +obj-$(CONFIG_PCIE_MEDIATEK) += pcie_mediatek.o +obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie_mediatek_gen3.o +obj-$(CONFIG_PCIE_ROCKCHIP) += pcie_rockchip.o +obj-$(CONFIG_PCIE_DW_ROCKCHIP) += pcie_dw_rockchip.o +obj-$(CONFIG_PCIE_DW_MESON) += pcie_dw_meson.o +obj-$(CONFIG_PCI_BRCMSTB) += pcie_brcmstb.o +obj-$(CONFIG_PCI_OCTEONTX) += pci_octeontx.o +obj-$(CONFIG_PCIE_OCTEON) += pcie_octeon.o +obj-$(CONFIG_PCIE_DW_SIFIVE) += pcie_dw_sifive.o +obj-$(CONFIG_PCIE_UNIPHIER) += pcie_uniphier.o +obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o +obj-$(CONFIG_PCIE_PLDA_COMMON) += pcie_plda_common.o +obj-$(CONFIG_PCIE_STARFIVE_JH7110) += pcie_starfive_jh7110.o +obj-$(CONFIG_PCIE_DW_IMX) += pcie_dw_imx.o diff --git a/drivers/pci/pci-aardvark.c b/drivers/pci/pci-aardvark.c new file mode 100644 index 00000000000..f5db4bdb760 --- /dev/null +++ b/drivers/pci/pci-aardvark.c @@ -0,0 +1,1012 @@ +/* + * *************************************************************************** + * Copyright (C) 2015 Marvell International Ltd. + * *************************************************************************** + * This program is free software: you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation, either version 2 of the License, or any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * *************************************************************************** + */ +/* pcie_advk.c + * + * Ported from Linux driver - driver/pci/host/pci-aardvark.c + * + * Author: Victor Gu <xigu@marvell.com> + * Hezi Shahmoon <hezi.shahmoon@marvell.com> + * Pali Rohár <pali@kernel.org> + * + */ + +#include <dm.h> +#include <pci.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/ioport.h> + +/* PCIe Root Port register offsets */ +#define ADVK_ROOT_PORT_PCI_CFG_OFF 0x0 +#define ADVK_ROOT_PORT_PCI_EXP_OFF 0xc0 +#define ADVK_ROOT_PORT_PCI_ERR_OFF 0x100 + +/* PIO registers */ +#define ADVK_PIO_BASE_ADDR 0x4000 +#define ADVK_PIO_CTRL (ADVK_PIO_BASE_ADDR + 0x0) +#define ADVK_PIO_CTRL_TYPE_MASK GENMASK(3, 0) +#define ADVK_PIO_CTRL_TYPE_SHIFT 0 +#define ADVK_PIO_CTRL_TYPE_RD_TYPE0 0x8 +#define ADVK_PIO_CTRL_TYPE_RD_TYPE1 0x9 +#define ADVK_PIO_CTRL_TYPE_WR_TYPE0 0xa +#define ADVK_PIO_CTRL_TYPE_WR_TYPE1 0xb +#define ADVK_PIO_CTRL_ADDR_WIN_DISABLE BIT(24) +#define ADVK_PIO_STAT (ADVK_PIO_BASE_ADDR + 0x4) +#define ADVK_PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) +#define ADVK_PIO_COMPLETION_STATUS_SHIFT 7 +#define ADVK_PIO_COMPLETION_STATUS_OK 0 +#define ADVK_PIO_COMPLETION_STATUS_UR 1 +#define ADVK_PIO_COMPLETION_STATUS_CRS 2 +#define ADVK_PIO_COMPLETION_STATUS_CA 4 +#define ADVK_PIO_NON_POSTED_REQ BIT(10) +#define ADVK_PIO_ERR_STATUS BIT(11) +#define ADVK_PIO_ADDR_LS (ADVK_PIO_BASE_ADDR + 0x8) +#define ADVK_PIO_ADDR_MS (ADVK_PIO_BASE_ADDR + 0xc) +#define ADVK_PIO_WR_DATA (ADVK_PIO_BASE_ADDR + 0x10) +#define ADVK_PIO_WR_DATA_STRB (ADVK_PIO_BASE_ADDR + 0x14) +#define ADVK_PIO_RD_DATA (ADVK_PIO_BASE_ADDR + 0x18) +#define ADVK_PIO_START (ADVK_PIO_BASE_ADDR + 0x1c) +#define ADVK_PIO_ISR (ADVK_PIO_BASE_ADDR + 0x20) + +/* Global Control registers */ +#define ADVK_GLOBAL_CTRL_BASE_ADDR 0x4800 +#define ADVK_GLOBAL_CTRL0 (ADVK_GLOBAL_CTRL_BASE_ADDR + 0x0) +#define ADVK_GLOBAL_CTRL0_SPEED_GEN_MASK GENMASK(1, 0) +#define ADVK_GLOBAL_CTRL0_SPEED_GEN_SHIFT 0 +#define ADVK_GLOBAL_CTRL0_SPEED_GEN_1 0 +#define ADVK_GLOBAL_CTRL0_SPEED_GEN_2 1 +#define ADVK_GLOBAL_CTRL0_SPEED_GEN_3 2 +#define ADVK_GLOBAL_CTRL0_IS_RC BIT(2) +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_MASK GENMASK(4, 3) +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_SHIFT 3 +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_1 0 +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_2 1 +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_4 2 +#define ADVK_GLOBAL_CTRL0_LANE_COUNT_8 3 +#define ADVK_GLOBAL_CTRL0_LINK_TRAINING_EN BIT(6) +#define ADVK_GLOBAL_CTRL2 (ADVK_GLOBAL_CTRL_BASE_ADDR + 0x8) +#define ADVK_GLOBAL_CTRL2_STRICT_ORDER_EN BIT(5) +#define ADVK_GLOBAL_CTRL2_ADDRWIN_MAP_EN BIT(6) + +/* PCIe window configuration registers */ +#define ADVK_OB_WIN_BASE_ADDR 0x4c00 +#define ADVK_OB_WIN_BLOCK_SIZE 0x20 +#define ADVK_OB_WIN_COUNT 8 +#define ADVK_OB_WIN_REG_ADDR(win, offset) (ADVK_OB_WIN_BASE_ADDR + ADVK_OB_WIN_BLOCK_SIZE * (win) + (offset)) +#define ADVK_OB_WIN_MATCH_LS(win) ADVK_OB_WIN_REG_ADDR(win, 0x00) +#define ADVK_OB_WIN_ENABLE BIT(0) +#define ADVK_OB_WIN_MATCH_MS(win) ADVK_OB_WIN_REG_ADDR(win, 0x04) +#define ADVK_OB_WIN_REMAP_LS(win) ADVK_OB_WIN_REG_ADDR(win, 0x08) +#define ADVK_OB_WIN_REMAP_MS(win) ADVK_OB_WIN_REG_ADDR(win, 0x0c) +#define ADVK_OB_WIN_MASK_LS(win) ADVK_OB_WIN_REG_ADDR(win, 0x10) +#define ADVK_OB_WIN_MASK_MS(win) ADVK_OB_WIN_REG_ADDR(win, 0x14) +#define ADVK_OB_WIN_ACTIONS(win) ADVK_OB_WIN_REG_ADDR(win, 0x18) +#define ADVK_OB_WIN_DEFAULT_ACTIONS (ADVK_OB_WIN_ACTIONS(ADVK_OB_WIN_COUNT-1) + 0x4) +#define ADVK_OB_WIN_FUNC_NUM_MASK GENMASK(31, 24) +#define ADVK_OB_WIN_FUNC_NUM_SHIFT 24 +#define ADVK_OB_WIN_FUNC_NUM_ENABLE BIT(23) +#define ADVK_OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20) +#define ADVK_OB_WIN_BUS_NUM_BITS_SHIFT 20 +#define ADVK_OB_WIN_MSG_CODE_ENABLE BIT(22) +#define ADVK_OB_WIN_MSG_CODE_MASK GENMASK(21, 14) +#define ADVK_OB_WIN_MSG_CODE_SHIFT 14 +#define ADVK_OB_WIN_MSG_PAYLOAD_LEN BIT(12) +#define ADVK_OB_WIN_ATTR_ENABLE BIT(11) +#define ADVK_OB_WIN_ATTR_TC_MASK GENMASK(10, 8) +#define ADVK_OB_WIN_ATTR_TC_SHIFT 8 +#define ADVK_OB_WIN_ATTR_RELAXED BIT(7) +#define ADVK_OB_WIN_ATTR_NOSNOOP BIT(6) +#define ADVK_OB_WIN_ATTR_POISON BIT(5) +#define ADVK_OB_WIN_ATTR_IDO BIT(4) +#define ADVK_OB_WIN_TYPE_MASK GENMASK(3, 0) +#define ADVK_OB_WIN_TYPE_SHIFT 0 +#define ADVK_OB_WIN_TYPE_MEM 0x0 +#define ADVK_OB_WIN_TYPE_IO 0x4 +#define ADVK_OB_WIN_TYPE_CONFIG_TYPE0 0x8 +#define ADVK_OB_WIN_TYPE_CONFIG_TYPE1 0x9 +#define ADVK_OB_WIN_TYPE_MSG 0xc + +/* Local Management Interface registers */ +#define ADVK_LMI_BASE_ADDR 0x6000 +#define ADVK_LMI_PHY_CFG0 (ADVK_LMI_BASE_ADDR + 0x0) +#define ADVK_LMI_PHY_CFG0_LTSSM_MASK GENMASK(29, 24) +#define ADVK_LMI_PHY_CFG0_LTSSM_SHIFT 24 +#define ADVK_LMI_PHY_CFG0_LTSSM_L0 0x10 +#define ADVK_LMI_PHY_CFG0_LTSSM_DISABLED 0x20 +#define ADVK_LMI_VENDOR_ID (ADVK_LMI_BASE_ADDR + 0x44) + +/* Core Control registers */ +#define ADVK_CORE_CTRL_BASE_ADDR 0x18000 +#define ADVK_CORE_CTRL_CONFIG (ADVK_CORE_CTRL_BASE_ADDR + 0x0) +#define ADVK_CORE_CTRL_CONFIG_COMMAND_MODE BIT(0) + +/* PCIe Retries & Timeout definitions */ +#define PIO_MAX_RETRIES 1500 +#define PIO_WAIT_TIMEOUT 1000 +#define LINK_MAX_RETRIES 10 +#define LINK_WAIT_TIMEOUT 100000 + +#define CFG_RD_CRS_VAL 0xFFFF0001 + +/** + * struct pcie_advk - Advk PCIe controller state + * + * @base: The base address of the register space. + * @sec_busno: Bus number for the device behind the PCIe root-port. + * @dev: The pointer to PCI uclass device. + * @reset_gpio: GPIO descriptor for PERST. + * @cfgcache: Buffer for emulation of PCIe Root Port's PCI Bridge registers + * that are not available on Aardvark. + * @cfgcrssve: For CRSSVE emulation. + */ +struct pcie_advk { + void *base; + int sec_busno; + struct udevice *dev; + struct gpio_desc reset_gpio; + u32 cfgcache[(0x3c - 0x10) / 4]; + bool cfgcrssve; +}; + +static inline void advk_writel(struct pcie_advk *pcie, uint val, uint reg) +{ + writel(val, pcie->base + reg); +} + +static inline uint advk_readl(struct pcie_advk *pcie, uint reg) +{ + return readl(pcie->base + reg); +} + +/** + * pcie_advk_link_up() - Check if PCIe link is up or not + * + * @pcie: The PCI device to access + * + * Return true on link up. + * Return false on link down. + */ +static bool pcie_advk_link_up(struct pcie_advk *pcie) +{ + u32 val, ltssm_state; + + val = advk_readl(pcie, ADVK_LMI_PHY_CFG0); + ltssm_state = (val & ADVK_LMI_PHY_CFG0_LTSSM_MASK) >> ADVK_LMI_PHY_CFG0_LTSSM_SHIFT; + return ltssm_state >= ADVK_LMI_PHY_CFG0_LTSSM_L0 && ltssm_state < ADVK_LMI_PHY_CFG0_LTSSM_DISABLED; +} + +/** + * pcie_advk_addr_valid() - Check for valid bus address + * + * @pcie: Pointer to the PCI bus + * @busno: Bus number of PCI device + * @dev: Device number of PCI device + * @func: Function number of PCI device + * @bdf: The PCI device to access + * + * Return: true on valid, false on invalid + */ +static bool pcie_advk_addr_valid(struct pcie_advk *pcie, + int busno, u8 dev, u8 func) +{ + /* On the root bus there is only one PCI Bridge */ + if (busno == 0 && (dev != 0 || func != 0)) + return false; + + /* Access to other buses is possible when link is up */ + if (busno != 0 && !pcie_advk_link_up(pcie)) + return false; + + /* + * In PCI-E only a single device (0) can exist on the secondary bus. + * Beyond the secondary bus, there might be a Switch and anything is + * possible. + */ + if (busno == pcie->sec_busno && dev != 0) + return false; + + return true; +} + +/** + * pcie_advk_wait_pio() - Wait for PIO access to be accomplished + * + * @pcie: The PCI device to access + * + * Wait up to 1.5 seconds for PIO access to be accomplished. + * + * Return positive - retry count if PIO access is accomplished. + * Return negative - error if PIO access is timed out. + */ +static int pcie_advk_wait_pio(struct pcie_advk *pcie) +{ + uint start, isr; + uint count; + + for (count = 1; count <= PIO_MAX_RETRIES; count++) { + start = advk_readl(pcie, ADVK_PIO_START); + isr = advk_readl(pcie, ADVK_PIO_ISR); + if (!start && isr) + return count; + /* + * Do not check the PIO state too frequently, + * 100us delay is appropriate. + */ + udelay(PIO_WAIT_TIMEOUT); + } + + dev_err(pcie->dev, "PIO read/write transfer time out\n"); + return -ETIMEDOUT; +} + +/** + * pcie_advk_check_pio_status() - Validate PIO status and get the read result + * + * @pcie: Pointer to the PCI bus + * @allow_crs: Only for read requests, if CRS response is allowed + * @read_val: Pointer to the read result + * + * Return: 0 on success + */ +static int pcie_advk_check_pio_status(struct pcie_advk *pcie, + bool allow_crs, + uint *read_val) +{ + int ret; + uint reg; + unsigned int status; + char *strcomp_status, *str_posted; + + reg = advk_readl(pcie, ADVK_PIO_STAT); + status = (reg & ADVK_PIO_COMPLETION_STATUS_MASK) >> + ADVK_PIO_COMPLETION_STATUS_SHIFT; + + switch (status) { + case ADVK_PIO_COMPLETION_STATUS_OK: + if (reg & ADVK_PIO_ERR_STATUS) { + strcomp_status = "COMP_ERR"; + ret = -EFAULT; + break; + } + /* Get the read result */ + if (read_val) + *read_val = advk_readl(pcie, ADVK_PIO_RD_DATA); + /* No error */ + strcomp_status = NULL; + ret = 0; + break; + case ADVK_PIO_COMPLETION_STATUS_UR: + strcomp_status = "UR"; + ret = -EOPNOTSUPP; + break; + case ADVK_PIO_COMPLETION_STATUS_CRS: + if (allow_crs && read_val) { + /* For reading, CRS is not an error status. */ + *read_val = CFG_RD_CRS_VAL; + strcomp_status = NULL; + ret = 0; + } else { + strcomp_status = "CRS"; + ret = -EAGAIN; + } + break; + case ADVK_PIO_COMPLETION_STATUS_CA: + strcomp_status = "CA"; + ret = -ECANCELED; + break; + default: + strcomp_status = "Unknown"; + ret = -EINVAL; + break; + } + + if (!strcomp_status) + return ret; + + if (reg & ADVK_PIO_NON_POSTED_REQ) + str_posted = "Non-posted"; + else + str_posted = "Posted"; + + dev_dbg(pcie->dev, "%s PIO Response Status: %s, %#x @ %#x\n", + str_posted, strcomp_status, reg, + advk_readl(pcie, ADVK_PIO_ADDR_LS)); + + return ret; +} + +/** + * pcie_advk_read_config() - Read from configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + * + * Return: 0 on success + */ +static int pcie_advk_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct pcie_advk *pcie = dev_get_priv(bus); + int busno = PCI_BUS(bdf) - dev_seq(bus); + int retry_count; + bool allow_crs; + ulong data; + uint reg; + int ret; + + dev_dbg(pcie->dev, "PCIE CFG read: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + if (!pcie_advk_addr_valid(pcie, busno, PCI_DEV(bdf), PCI_FUNC(bdf))) { + dev_dbg(pcie->dev, "- out of range\n"); + *valuep = pci_get_ff(size); + return 0; + } + + /* + * The configuration space of the PCI Bridge on the root bus (zero) is + * not accessible via PIO transfers like all other PCIe devices. PCI + * Bridge config registers are available directly in Aardvark memory + * space starting at offset zero. The PCI Bridge config space is of + * Type 0, but the BAR registers (including ROM BAR) don't have the same + * meaning as in the PCIe specification. Therefore do not access BAR + * registers and non-common registers (those which have different + * meaning for Type 0 and Type 1 config space) of the PCI Bridge + * and instead read their content from driver virtual cfgcache[]. + */ + if (busno == 0) { + if ((offset >= 0x10 && offset < 0x34) || (offset >= 0x38 && offset < 0x3c)) + data = pcie->cfgcache[(offset - 0x10) / 4]; + else + data = advk_readl(pcie, ADVK_ROOT_PORT_PCI_CFG_OFF + (offset & ~3)); + + if ((offset & ~3) == (PCI_HEADER_TYPE & ~3)) { + /* + * Change Header Type of PCI Bridge device to Type 1 + * (0x01, used by PCI Bridges) because hardwired value + * is Type 0 (0x00, used by Endpoint devices). + */ + data &= ~0x007f0000; + data |= PCI_HEADER_TYPE_BRIDGE << 16; + } + + if ((offset & ~3) == ADVK_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_RTCTL) { + /* CRSSVE bit is stored only in cache */ + if (pcie->cfgcrssve) + data |= PCI_EXP_RTCTL_CRSSVE; + } + + if ((offset & ~3) == ADVK_ROOT_PORT_PCI_EXP_OFF + (PCI_EXP_RTCAP & ~3)) { + /* CRS is emulated below, so set CRSVIS capability */ + data |= PCI_EXP_RTCAP_CRSVIS << 16; + } + + *valuep = pci_conv_32_to_size(data, offset, size); + + return 0; + } + + /* + * Returning fabricated CRS value (0xFFFF0001) by PCIe Root Complex to + * OS is allowed only for 4-byte PCI_VENDOR_ID config read request and + * only when CRSSVE bit in Root Port PCIe device is enabled. In all + * other error PCIe Root Complex must return all-ones. + * + * U-Boot currently does not support handling of CRS return value for + * PCI_VENDOR_ID config read request and also does not set CRSSVE bit. + * So it means that pcie->cfgcrssve is false. But the code is prepared + * for returning CRS, so that if U-Boot does support CRS in the future, + * it will work for Aardvark. + */ + allow_crs = (offset == PCI_VENDOR_ID) && (size == PCI_SIZE_32) && pcie->cfgcrssve; + + if (advk_readl(pcie, ADVK_PIO_START)) { + dev_err(pcie->dev, + "Previous PIO read/write transfer is still running\n"); + if (allow_crs) { + *valuep = CFG_RD_CRS_VAL; + return 0; + } + *valuep = pci_get_ff(size); + return -EAGAIN; + } + + /* Program the control register */ + reg = advk_readl(pcie, ADVK_PIO_CTRL); + reg &= ~ADVK_PIO_CTRL_TYPE_MASK; + if (busno == pcie->sec_busno) + reg |= ADVK_PIO_CTRL_TYPE_RD_TYPE0 << ADVK_PIO_CTRL_TYPE_SHIFT; + else + reg |= ADVK_PIO_CTRL_TYPE_RD_TYPE1 << ADVK_PIO_CTRL_TYPE_SHIFT; + advk_writel(pcie, reg, ADVK_PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_ECAM_OFFSET(busno, PCI_DEV(bdf), PCI_FUNC(bdf), (offset & ~0x3)); + advk_writel(pcie, reg, ADVK_PIO_ADDR_LS); + advk_writel(pcie, 0, ADVK_PIO_ADDR_MS); + + /* Program the data strobe */ + advk_writel(pcie, 0xf, ADVK_PIO_WR_DATA_STRB); + + retry_count = 0; + +retry: + /* Start the transfer */ + advk_writel(pcie, 1, ADVK_PIO_ISR); + advk_writel(pcie, 1, ADVK_PIO_START); + + ret = pcie_advk_wait_pio(pcie); + if (ret < 0) { + if (allow_crs) { + *valuep = CFG_RD_CRS_VAL; + return 0; + } + *valuep = pci_get_ff(size); + return ret; + } + + retry_count += ret; + + /* Check PIO status and get the read result */ + ret = pcie_advk_check_pio_status(pcie, allow_crs, ®); + if (ret == -EAGAIN && retry_count < PIO_MAX_RETRIES) + goto retry; + if (ret) { + *valuep = pci_get_ff(size); + return ret; + } + + dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08x)\n", + offset, size, reg); + *valuep = pci_conv_32_to_size(reg, offset, size); + + return 0; +} + +/** + * pcie_calc_datastrobe() - Calculate data strobe + * + * @offset: The offset into the device's configuration space + * @size: Indicates the size of access to perform + * + * Calculate data strobe according to offset and size + * + */ +static uint pcie_calc_datastrobe(uint offset, enum pci_size_t size) +{ + uint bytes, data_strobe; + + switch (size) { + case PCI_SIZE_8: + bytes = 1; + break; + case PCI_SIZE_16: + bytes = 2; + break; + default: + bytes = 4; + } + + data_strobe = GENMASK(bytes - 1, 0) << (offset & 0x3); + + return data_strobe; +} + +/** + * pcie_advk_write_config() - Write to configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + * + * Return: 0 on success + */ +static int pcie_advk_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct pcie_advk *pcie = dev_get_priv(bus); + int busno = PCI_BUS(bdf) - dev_seq(bus); + int retry_count; + ulong data; + uint reg; + int ret; + + dev_dbg(pcie->dev, "PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08lx)\n", + offset, size, value); + + if (!pcie_advk_addr_valid(pcie, busno, PCI_DEV(bdf), PCI_FUNC(bdf))) { + dev_dbg(pcie->dev, "- out of range\n"); + return 0; + } + + /* + * As explained in pcie_advk_read_config(), PCI Bridge config registers + * are available directly in Aardvark memory space starting at offset + * zero. Type 1 specific registers are not available, so we write their + * content only into driver virtual cfgcache[]. + */ + if (busno == 0) { + if ((offset >= 0x10 && offset < 0x34) || + (offset >= 0x38 && offset < 0x3c)) { + data = pcie->cfgcache[(offset - 0x10) / 4]; + data = pci_conv_size_to_32(data, value, offset, size); + /* This PCI bridge does not have configurable bars */ + if ((offset & ~3) == PCI_BASE_ADDRESS_0 || + (offset & ~3) == PCI_BASE_ADDRESS_1 || + (offset & ~3) == PCI_ROM_ADDRESS1) + data = 0x0; + pcie->cfgcache[(offset - 0x10) / 4] = data; + } else { + data = advk_readl(pcie, ADVK_ROOT_PORT_PCI_CFG_OFF + (offset & ~3)); + data = pci_conv_size_to_32(data, value, offset, size); + advk_writel(pcie, data, ADVK_ROOT_PORT_PCI_CFG_OFF + (offset & ~3)); + } + + if (offset == PCI_SECONDARY_BUS || + (offset == PCI_PRIMARY_BUS && size != PCI_SIZE_8)) + pcie->sec_busno = (data >> 8) & 0xff; + + if ((offset & ~3) == ADVK_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_RTCTL) + pcie->cfgcrssve = data & PCI_EXP_RTCTL_CRSSVE; + + return 0; + } + + if (advk_readl(pcie, ADVK_PIO_START)) { + dev_err(pcie->dev, + "Previous PIO read/write transfer is still running\n"); + return -EAGAIN; + } + + /* Program the control register */ + reg = advk_readl(pcie, ADVK_PIO_CTRL); + reg &= ~ADVK_PIO_CTRL_TYPE_MASK; + if (busno == pcie->sec_busno) + reg |= ADVK_PIO_CTRL_TYPE_WR_TYPE0 << ADVK_PIO_CTRL_TYPE_SHIFT; + else + reg |= ADVK_PIO_CTRL_TYPE_WR_TYPE1 << ADVK_PIO_CTRL_TYPE_SHIFT; + advk_writel(pcie, reg, ADVK_PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_ECAM_OFFSET(busno, PCI_DEV(bdf), PCI_FUNC(bdf), (offset & ~0x3)); + advk_writel(pcie, reg, ADVK_PIO_ADDR_LS); + advk_writel(pcie, 0, ADVK_PIO_ADDR_MS); + dev_dbg(pcie->dev, "\tPIO req. - addr = 0x%08x\n", reg); + + /* Program the data register */ + reg = pci_conv_size_to_32(0, value, offset, size); + advk_writel(pcie, reg, ADVK_PIO_WR_DATA); + dev_dbg(pcie->dev, "\tPIO req. - val = 0x%08x\n", reg); + + /* Program the data strobe */ + reg = pcie_calc_datastrobe(offset, size); + advk_writel(pcie, reg, ADVK_PIO_WR_DATA_STRB); + dev_dbg(pcie->dev, "\tPIO req. - strb = 0x%02x\n", reg); + + retry_count = 0; + +retry: + /* Start the transfer */ + advk_writel(pcie, 1, ADVK_PIO_ISR); + advk_writel(pcie, 1, ADVK_PIO_START); + + ret = pcie_advk_wait_pio(pcie); + if (ret < 0) + return ret; + + retry_count += ret; + + /* Check PIO status */ + ret = pcie_advk_check_pio_status(pcie, false, NULL); + if (ret == -EAGAIN && retry_count < PIO_MAX_RETRIES) + goto retry; + return ret; +} + +/** + * pcie_advk_wait_for_link() - Wait for link training to be accomplished + * + * @pcie: The PCI device to access + * + * Wait up to 1 second for link training to be accomplished. + */ +static void pcie_advk_wait_for_link(struct pcie_advk *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_MAX_RETRIES; retries++) { + if (pcie_advk_link_up(pcie)) { + printf("PCIe: Link up\n"); + return; + } + + udelay(LINK_WAIT_TIMEOUT); + } + + printf("PCIe: Link down\n"); +} + +/* + * Set PCIe address window register which could be used for memory + * mapping. + */ +static void pcie_advk_set_ob_win(struct pcie_advk *pcie, u8 win_num, + phys_addr_t match, phys_addr_t remap, + phys_addr_t mask, u32 actions) +{ + advk_writel(pcie, ADVK_OB_WIN_ENABLE | + lower_32_bits(match), ADVK_OB_WIN_MATCH_LS(win_num)); + advk_writel(pcie, upper_32_bits(match), ADVK_OB_WIN_MATCH_MS(win_num)); + advk_writel(pcie, lower_32_bits(remap), ADVK_OB_WIN_REMAP_LS(win_num)); + advk_writel(pcie, upper_32_bits(remap), ADVK_OB_WIN_REMAP_MS(win_num)); + advk_writel(pcie, lower_32_bits(mask), ADVK_OB_WIN_MASK_LS(win_num)); + advk_writel(pcie, upper_32_bits(mask), ADVK_OB_WIN_MASK_MS(win_num)); + advk_writel(pcie, actions, ADVK_OB_WIN_ACTIONS(win_num)); +} + +static void pcie_advk_disable_ob_win(struct pcie_advk *pcie, u8 win_num) +{ + advk_writel(pcie, 0, ADVK_OB_WIN_MATCH_LS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_MATCH_MS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_REMAP_LS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_REMAP_MS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_MASK_LS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_MASK_MS(win_num)); + advk_writel(pcie, 0, ADVK_OB_WIN_ACTIONS(win_num)); +} + +static void pcie_advk_set_ob_region(struct pcie_advk *pcie, int *wins, + struct pci_region *region, u32 actions) +{ + phys_addr_t phys_start = region->phys_start; + pci_addr_t bus_start = region->bus_start; + pci_size_t size = region->size; + phys_addr_t win_mask; + u64 win_size; + + if (*wins == -1) + return; + + /* + * The n-th PCIe window is configured by tuple (match, remap, mask) + * and an access to address A uses this window if A matches the + * match with given mask. + * So every PCIe window size must be a power of two and every start + * address must be aligned to window size. Minimal size is 64 KiB + * because lower 16 bits of mask must be zero. Remapped address + * may have set only bits from the mask. + */ + while (*wins < ADVK_OB_WIN_COUNT && size > 0) { + /* Calculate the largest aligned window size */ + win_size = (1ULL << (fls64(size) - 1)) | + (phys_start ? (1ULL << __ffs64(phys_start)) : 0); + win_size = 1ULL << __ffs64(win_size); + win_mask = ~(win_size - 1); + if (win_size < 0x10000 || (bus_start & ~win_mask)) + break; + + dev_dbg(pcie->dev, + "Configuring PCIe window %d: [0x%llx-0x%llx] as 0x%x\n", + *wins, (u64)phys_start, (u64)phys_start + win_size, + actions); + pcie_advk_set_ob_win(pcie, *wins, phys_start, bus_start, + win_mask, actions); + + phys_start += win_size; + bus_start += win_size; + size -= win_size; + (*wins)++; + } + + if (size > 0) { + *wins = -1; + dev_err(pcie->dev, + "Invalid PCIe region [0x%llx-0x%llx]\n", + (u64)region->phys_start, + (u64)region->phys_start + region->size); + } +} + +/** + * pcie_advk_setup_hw() - PCIe initailzation + * + * @pcie: The PCI device to access + * + * Return: 0 on success + */ +static int pcie_advk_setup_hw(struct pcie_advk *pcie) +{ + struct pci_region *io, *mem, *pref; + int i, wins; + u32 reg; + + /* Set from Command to Direct mode */ + reg = advk_readl(pcie, ADVK_CORE_CTRL_CONFIG); + reg &= ~ADVK_CORE_CTRL_CONFIG_COMMAND_MODE; + advk_writel(pcie, reg, ADVK_CORE_CTRL_CONFIG); + + /* Set PCI global control register to RC mode */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL0); + reg |= ADVK_GLOBAL_CTRL0_IS_RC; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL0); + + /* + * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab. + * ADVK_LMI_VENDOR_ID contains vendor id in low 16 bits and subsystem vendor + * id in high 16 bits. Updating this register changes readback value of + * read-only vendor id bits in Root Port PCI_VENDOR_ID register. Workaround + * for erratum 4.1: "The value of device and vendor ID is incorrect". + */ + advk_writel(pcie, 0x11ab11ab, ADVK_LMI_VENDOR_ID); + + /* + * Change Class Code of PCI Bridge device to PCI Bridge (0x600400), + * because default value is Mass Storage Controller (0x010400), causing + * U-Boot to fail to recognize it as P2P Bridge. + * + * Note that this Aardvark PCI Bridge does not have a compliant Type 1 + * Configuration Space and it even cannot be accessed via Aardvark's + * PCI config space access method. Aardvark PCI Bridge Config space is + * available in internal Aardvark registers starting at offset 0x0 + * and has format of Type 0 config space. + * + * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) + * have the same format in Marvell's specification as in PCIe + * specification, but their meaning is totally different (and not even + * the same meaning as explained in the corresponding comment in the + * pci_mvebu driver; aardvark is still different). + * + * So our driver converts Type 0 config space to Type 1 and reports + * Header Type as Type 1. Access to BAR registers and to non-existent + * Type 1 registers is redirected to the virtual cfgcache[] buffer, + * which avoids changing unrelated registers. + */ + reg = advk_readl(pcie, ADVK_ROOT_PORT_PCI_CFG_OFF + PCI_CLASS_REVISION); + reg &= ~0xffffff00; + reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + advk_writel(pcie, reg, ADVK_ROOT_PORT_PCI_CFG_OFF + PCI_CLASS_REVISION); + + /* Enable generation and checking of ECRC on PCIe Root Port */ + reg = advk_readl(pcie, ADVK_ROOT_PORT_PCI_ERR_OFF + PCI_ERR_CAP); + reg |= PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE; + advk_writel(pcie, reg, ADVK_ROOT_PORT_PCI_ERR_OFF + PCI_ERR_CAP); + + /* Set PCIe Device Control register on PCIe Root Port */ + reg = advk_readl(pcie, ADVK_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_DEVCTL); + reg &= ~PCI_EXP_DEVCTL_RELAX_EN; + reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; + reg &= ~PCI_EXP_DEVCTL_PAYLOAD; + reg &= ~PCI_EXP_DEVCTL_READRQ; + reg |= PCI_EXP_DEVCTL_PAYLOAD_512B; + reg |= PCI_EXP_DEVCTL_READRQ_512B; + advk_writel(pcie, reg, ADVK_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_DEVCTL); + + /* Program PCIe Control 2 to disable strict ordering */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL2); + reg &= ~ADVK_GLOBAL_CTRL2_STRICT_ORDER_EN; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL2); + + /* Set GEN2 */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL0); + reg &= ~ADVK_GLOBAL_CTRL0_SPEED_GEN_MASK; + reg |= ADVK_GLOBAL_CTRL0_SPEED_GEN_2 << ADVK_GLOBAL_CTRL0_SPEED_GEN_SHIFT; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL0); + + /* Set lane X1 */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL0); + reg &= ~ADVK_GLOBAL_CTRL0_LANE_COUNT_MASK; + reg |= ADVK_GLOBAL_CTRL0_LANE_COUNT_1 << ADVK_GLOBAL_CTRL0_LANE_COUNT_SHIFT; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL0); + + /* Enable link training */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL0); + reg |= ADVK_GLOBAL_CTRL0_LINK_TRAINING_EN; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL0); + + /* + * Enable AXI address window location generation: + * When it is enabled, the default outbound window + * configurations (Default User Field: 0xD0074CFC) + * are used to transparent address translation for + * the outbound transactions. Thus, PCIe address + * windows are not required for transparent memory + * access when default outbound window configuration + * is set for memory access. + */ + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL2); + reg |= ADVK_GLOBAL_CTRL2_ADDRWIN_MAP_EN; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL2); + + /* + * Bypass the address window mapping for PIO: + * Since PIO access already contains all required + * info over AXI interface by PIO registers, the + * address window is not required. + */ + reg = advk_readl(pcie, ADVK_PIO_CTRL); + reg |= ADVK_PIO_CTRL_ADDR_WIN_DISABLE; + advk_writel(pcie, reg, ADVK_PIO_CTRL); + + /* + * Set memory access in Default User Field so it + * is not required to configure PCIe address for + * transparent memory access. + */ + advk_writel(pcie, ADVK_OB_WIN_TYPE_MEM, ADVK_OB_WIN_DEFAULT_ACTIONS); + + /* + * Configure PCIe address windows for non-memory or + * non-transparent access as by default PCIe uses + * transparent memory access. + */ + wins = 0; + pci_get_regions(pcie->dev, &io, &mem, &pref); + if (io) + pcie_advk_set_ob_region(pcie, &wins, io, ADVK_OB_WIN_TYPE_IO); + if (mem && mem->phys_start != mem->bus_start) + pcie_advk_set_ob_region(pcie, &wins, mem, ADVK_OB_WIN_TYPE_MEM); + if (pref && pref->phys_start != pref->bus_start) + pcie_advk_set_ob_region(pcie, &wins, pref, ADVK_OB_WIN_TYPE_MEM); + + /* Disable remaining PCIe outbound windows */ + for (i = ((wins >= 0) ? wins : 0); i < ADVK_OB_WIN_COUNT; i++) + pcie_advk_disable_ob_win(pcie, i); + + if (wins == -1) + return -EINVAL; + + /* Wait for PCIe link up */ + pcie_advk_wait_for_link(pcie); + + return 0; +} + +/** + * pcie_advk_probe() - Probe the PCIe bus for active link + * + * @dev: A pointer to the device being operated on + * + * Probe for an active link on the PCIe bus and configure the controller + * to enable this port. + * + * Return: 0 on success, else -ENODEV + */ +static int pcie_advk_probe(struct udevice *dev) +{ + struct pcie_advk *pcie = dev_get_priv(dev); + + gpio_request_by_name(dev, "reset-gpios", 0, &pcie->reset_gpio, + GPIOD_IS_OUT); + /* + * Issue reset to add-in card through the dedicated GPIO. + * Some boards are connecting the card reset pin to common system + * reset wire and others are using separate GPIO port. + * In the last case we have to release a reset of the addon card + * using this GPIO. + * + * FIX-ME: + * The PCIe RESET signal is not supposed to be released along + * with the SOC RESET signal. It should be lowered as early as + * possible before PCIe PHY initialization. Moreover, the PCIe + * clock should be gated as well. + */ + if (dm_gpio_is_valid(&pcie->reset_gpio)) { + dev_dbg(dev, "Toggle PCIE Reset GPIO ...\n"); + dm_gpio_set_value(&pcie->reset_gpio, 1); + mdelay(200); + dm_gpio_set_value(&pcie->reset_gpio, 0); + } else { + dev_warn(dev, "PCIE Reset on GPIO support is missing\n"); + } + + pcie->dev = pci_get_controller(dev); + + /* PCI Bridge support 32-bit I/O and 64-bit prefetch mem addressing */ + pcie->cfgcache[(PCI_IO_BASE - 0x10) / 4] = + PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8); + pcie->cfgcache[(PCI_PREF_MEMORY_BASE - 0x10) / 4] = + PCI_PREF_RANGE_TYPE_64 | (PCI_PREF_RANGE_TYPE_64 << 16); + + return pcie_advk_setup_hw(pcie); +} + +static int pcie_advk_remove(struct udevice *dev) +{ + struct pcie_advk *pcie = dev_get_priv(dev); + u32 reg; + int i; + + for (i = 0; i < ADVK_OB_WIN_COUNT; i++) + pcie_advk_disable_ob_win(pcie, i); + + reg = advk_readl(pcie, ADVK_ROOT_PORT_PCI_CFG_OFF + PCI_COMMAND); + reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + advk_writel(pcie, reg, ADVK_ROOT_PORT_PCI_CFG_OFF + PCI_COMMAND); + + reg = advk_readl(pcie, ADVK_GLOBAL_CTRL0); + reg &= ~ADVK_GLOBAL_CTRL0_LINK_TRAINING_EN; + advk_writel(pcie, reg, ADVK_GLOBAL_CTRL0); + + return 0; +} + +/** + * pcie_advk_of_to_plat() - Translate from DT to device state + * + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pcie_advk_of_to_plat(struct udevice *dev) +{ + struct pcie_advk *pcie = dev_get_priv(dev); + + /* Get the register base address */ + pcie->base = dev_read_addr_ptr(dev); + if (!pcie->base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops pcie_advk_ops = { + .read_config = pcie_advk_read_config, + .write_config = pcie_advk_write_config, +}; + +static const struct udevice_id pcie_advk_ids[] = { + { .compatible = "marvell,armada-3700-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_advk) = { + .name = "pcie_advk", + .id = UCLASS_PCI, + .of_match = pcie_advk_ids, + .ops = &pcie_advk_ops, + .of_to_plat = pcie_advk_of_to_plat, + .probe = pcie_advk_probe, + .remove = pcie_advk_remove, + .flags = DM_FLAG_OS_PREPARE, + .priv_auto = sizeof(struct pcie_advk), +}; diff --git a/drivers/pci/pci-emul-uclass.c b/drivers/pci/pci-emul-uclass.c new file mode 100644 index 00000000000..166ee9fcd43 --- /dev/null +++ b/drivers/pci/pci-emul-uclass.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2014 Google, Inc + * Written by Simon Glass <sjg@chromium.org> + */ + +#include <dm.h> +#include <fdtdec.h> +#include <log.h> +#include <linux/libfdt.h> +#include <pci.h> +#include <dm/lists.h> + +struct sandbox_pci_emul_priv { + int dev_count; +}; + +int sandbox_pci_get_emul(const struct udevice *bus, pci_dev_t find_devfn, + struct udevice **containerp, struct udevice **emulp) +{ + struct pci_emul_uc_priv *upriv; + struct udevice *dev; + int ret; + + *containerp = NULL; + ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(find_devfn), &dev); + if (ret) { + debug("%s: Could not find emulator for dev %x\n", __func__, + find_devfn); + return ret; + } + *containerp = dev; + + ret = uclass_get_device_by_phandle(UCLASS_PCI_EMUL, dev, "sandbox,emul", + emulp); + if (!ret) { + upriv = dev_get_uclass_priv(*emulp); + + upriv->client = dev; + } else if (device_get_uclass_id(dev) != UCLASS_PCI_GENERIC) { + /* + * See commit 4345998ae9df, + * "pci: sandbox: Support dynamically binding device driver" + */ + *emulp = dev; + } + + return 0; +} + +int sandbox_pci_get_client(struct udevice *emul, struct udevice **devp) +{ + struct pci_emul_uc_priv *upriv = dev_get_uclass_priv(emul); + + if (!upriv->client) + return -ENOENT; + *devp = upriv->client; + + return 0; +} + +uint sandbox_pci_read_bar(u32 barval, int type, uint size) +{ + u32 result; + + result = barval; + if (result == 0xffffffff) { + if (type == PCI_BASE_ADDRESS_SPACE_IO) { + result = (~(size - 1) & + PCI_BASE_ADDRESS_IO_MASK) | + PCI_BASE_ADDRESS_SPACE_IO; + } else { + result = (~(size - 1) & + PCI_BASE_ADDRESS_MEM_MASK) | + PCI_BASE_ADDRESS_MEM_TYPE_32; + } + } + + return result; +} + +static int sandbox_pci_emul_post_probe(struct udevice *dev) +{ + struct sandbox_pci_emul_priv *priv = uclass_get_priv(dev->uclass); + + priv->dev_count++; + sandbox_set_enable_pci_map(true); + + return 0; +} + +static int sandbox_pci_emul_pre_remove(struct udevice *dev) +{ + struct sandbox_pci_emul_priv *priv = uclass_get_priv(dev->uclass); + + priv->dev_count--; + sandbox_set_enable_pci_map(priv->dev_count > 0); + + return 0; +} + +UCLASS_DRIVER(pci_emul) = { + .id = UCLASS_PCI_EMUL, + .name = "pci_emul", + .post_probe = sandbox_pci_emul_post_probe, + .pre_remove = sandbox_pci_emul_pre_remove, + .priv_auto = sizeof(struct sandbox_pci_emul_priv), + .per_device_auto = sizeof(struct pci_emul_uc_priv), +}; + +/* + * This uclass is a child of the pci bus. Its plat is not defined here so + * is defined by its parent, UCLASS_PCI, which uses struct pci_child_plat. + * See per_child_plat_auto in UCLASS_DRIVER(pci). + */ +UCLASS_DRIVER(pci_emul_parent) = { + .id = UCLASS_PCI_EMUL_PARENT, + .name = "pci_emul_parent", + .post_bind = dm_scan_fdt_dev, +}; + +static const struct udevice_id pci_emul_parent_ids[] = { + { .compatible = "sandbox,pci-emul-parent" }, + { } +}; + +U_BOOT_DRIVER(pci_emul_parent_drv) = { + .name = "pci_emul_parent_drv", + .id = UCLASS_PCI_EMUL_PARENT, + .of_match = pci_emul_parent_ids, +}; diff --git a/drivers/pci/pci-rcar-gen2.c b/drivers/pci/pci-rcar-gen2.c new file mode 100644 index 00000000000..12c31e74087 --- /dev/null +++ b/drivers/pci/pci-rcar-gen2.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RCar Gen2 PCIEC driver + * + * Copyright (C) 2018 Marek Vasut <marek.vasut@gmail.com> + */ + +#include <config.h> +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <errno.h> +#include <pci.h> +#include <linux/bitops.h> + +/* AHB-PCI Bridge PCI communication registers */ +#define RCAR_AHBPCI_PCICOM_OFFSET 0x800 + +#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) +#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) +#define RCAR_PCIAHB_PREFETCH0 0x0 +#define RCAR_PCIAHB_PREFETCH4 0x1 +#define RCAR_PCIAHB_PREFETCH8 0x2 +#define RCAR_PCIAHB_PREFETCH16 0x3 + +#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) +#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) +#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) +#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) +#define RCAR_AHBPCI_WIN1_HOST BIT(30) +#define RCAR_AHBPCI_WIN1_DEVICE BIT(31) + +#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) +#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) +#define RCAR_PCI_INT_SIGTABORT BIT(0) +#define RCAR_PCI_INT_SIGRETABORT BIT(1) +#define RCAR_PCI_INT_REMABORT BIT(2) +#define RCAR_PCI_INT_PERR BIT(3) +#define RCAR_PCI_INT_SIGSERR BIT(4) +#define RCAR_PCI_INT_RESERR BIT(5) +#define RCAR_PCI_INT_WIN1ERR BIT(12) +#define RCAR_PCI_INT_WIN2ERR BIT(13) +#define RCAR_PCI_INT_A BIT(16) +#define RCAR_PCI_INT_B BIT(17) +#define RCAR_PCI_INT_PME BIT(19) +#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \ + RCAR_PCI_INT_SIGRETABORT | \ + RCAR_PCI_INT_SIGRETABORT | \ + RCAR_PCI_INT_REMABORT | \ + RCAR_PCI_INT_PERR | \ + RCAR_PCI_INT_SIGSERR | \ + RCAR_PCI_INT_RESERR | \ + RCAR_PCI_INT_WIN1ERR | \ + RCAR_PCI_INT_WIN2ERR) + +#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) +#define RCAR_AHB_BUS_MMODE_HTRANS BIT(0) +#define RCAR_AHB_BUS_MMODE_BYTE_BURST BIT(1) +#define RCAR_AHB_BUS_MMODE_WR_INCR BIT(2) +#define RCAR_AHB_BUS_MMODE_HBUS_REQ BIT(7) +#define RCAR_AHB_BUS_SMODE_READYCTR BIT(17) +#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ + RCAR_AHB_BUS_MMODE_BYTE_BURST | \ + RCAR_AHB_BUS_MMODE_WR_INCR | \ + RCAR_AHB_BUS_MMODE_HBUS_REQ | \ + RCAR_AHB_BUS_SMODE_READYCTR) + +#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) +#define RCAR_USBCTR_USBH_RST BIT(0) +#define RCAR_USBCTR_PCICLK_MASK BIT(1) +#define RCAR_USBCTR_PLL_RST BIT(2) +#define RCAR_USBCTR_DIRPD BIT(8) +#define RCAR_USBCTR_PCIAHB_WIN2_EN BIT(9) +#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) + +#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) +#define RCAR_PCI_ARBITER_PCIREQ0 BIT(0) +#define RCAR_PCI_ARBITER_PCIREQ1 BIT(1) +#define RCAR_PCI_ARBITER_PCIBP_MODE BIT(12) + +#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) + +struct rcar_gen2_pci_priv { + fdt_addr_t cfg_base; + fdt_addr_t mem_base; +}; + +static int rcar_gen2_pci_addr_valid(pci_dev_t d, uint offset) +{ + u32 slot; + + if (PCI_FUNC(d)) + return -EINVAL; + + /* Only one EHCI/OHCI device built-in */ + slot = PCI_DEV(d); + if (slot != 1 && slot != 2) + return -EINVAL; + + /* bridge logic only has registers to 0x40 */ + if (slot == 0x0 && offset >= 0x40) + return -EINVAL; + + return 0; +} + +static u32 get_bus_address(const struct udevice *dev, pci_dev_t bdf, u32 offset) +{ + struct rcar_gen2_pci_priv *priv = dev_get_priv(dev); + + return priv->cfg_base + (PCI_DEV(bdf) >> 1) * 0x100 + (offset & ~3); +} + +static u32 setup_bus_address(struct udevice *dev, pci_dev_t bdf, u32 offset) +{ + struct rcar_gen2_pci_priv *priv = dev_get_priv(dev); + u32 reg; + + reg = PCI_DEV(bdf) ? RCAR_AHBPCI_WIN1_DEVICE : RCAR_AHBPCI_WIN1_HOST; + reg |= RCAR_AHBPCI_WIN_CTR_CFG; + writel(reg, priv->cfg_base + RCAR_AHBPCI_WIN1_CTR_REG); + + return get_bus_address(dev, bdf, offset); +} + +static int rcar_gen2_pci_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *value, + enum pci_size_t size) +{ + u32 addr, reg; + int ret; + + ret = rcar_gen2_pci_addr_valid(bdf, offset); + if (ret) { + *value = pci_get_ff(size); + return 0; + } + + addr = get_bus_address(dev, bdf, offset); + reg = readl(addr); + *value = pci_conv_32_to_size(reg, offset, size); + + return 0; +} + +static int rcar_gen2_pci_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + u32 addr, reg, old; + int ret; + + ret = rcar_gen2_pci_addr_valid(bdf, offset); + if (ret) + return ret; + + addr = get_bus_address(dev, bdf, offset); + + old = readl(addr); + reg = pci_conv_size_to_32(old, value, offset, size); + writel(reg, addr); + + return 0; +} + +static int rcar_gen2_pci_probe(struct udevice *dev) +{ + struct rcar_gen2_pci_priv *priv = dev_get_priv(dev); + struct clk pci_clk; + u32 devad; + int ret; + + ret = clk_get_by_index(dev, 0, &pci_clk); + if (ret) + return ret; + + ret = clk_enable(&pci_clk); + if (ret) + return ret; + + /* Clock & Reset & Direct Power Down */ + clrsetbits_le32(priv->cfg_base + RCAR_USBCTR_REG, + RCAR_USBCTR_DIRPD | RCAR_USBCTR_PCICLK_MASK | + RCAR_USBCTR_USBH_RST, + RCAR_USBCTR_PCIAHB_WIN1_1G); + clrbits_le32(priv->cfg_base + RCAR_USBCTR_REG, RCAR_USBCTR_PLL_RST); + + /* AHB-PCI Bridge Communication Registers */ + writel(RCAR_AHB_BUS_MODE, priv->cfg_base + RCAR_AHB_BUS_CTR_REG); + writel((CFG_SYS_SDRAM_BASE & 0xf0000000) | RCAR_PCIAHB_PREFETCH16, + priv->cfg_base + RCAR_PCIAHB_WIN1_CTR_REG); + writel(0xf0000000 | RCAR_PCIAHB_PREFETCH16, + priv->cfg_base + RCAR_PCIAHB_WIN2_CTR_REG); + writel(priv->mem_base | RCAR_AHBPCI_WIN_CTR_MEM, + priv->cfg_base + RCAR_AHBPCI_WIN2_CTR_REG); + setbits_le32(priv->cfg_base + RCAR_PCI_ARBITER_CTR_REG, + RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | + RCAR_PCI_ARBITER_PCIBP_MODE); + + /* PCI Configuration Registers for AHBPCI */ + devad = setup_bus_address(dev, PCI_BDF(0, 0, 0), 0); + writel(priv->cfg_base + 0x800, devad + PCI_BASE_ADDRESS_0); + writel(CFG_SYS_SDRAM_BASE & 0xf0000000, devad + PCI_BASE_ADDRESS_1); + writel(0xf0000000, devad + PCI_BASE_ADDRESS_2); + writel(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_PARITY | PCI_COMMAND_SERR, + devad + PCI_COMMAND); + + /* PCI Configuration Registers for OHCI */ + devad = setup_bus_address(dev, PCI_BDF(0, 1, 0), 0); + writel(priv->mem_base + 0x0, devad + PCI_BASE_ADDRESS_0); + writel(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_PARITY | PCI_COMMAND_SERR, + devad + PCI_COMMAND); + + /* PCI Configuration Registers for EHCI */ + devad = setup_bus_address(dev, PCI_BDF(0, 2, 0), 0); + writel(priv->mem_base + 0x1000, devad + PCI_BASE_ADDRESS_0); + writel(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_PARITY | PCI_COMMAND_SERR, + devad + PCI_COMMAND); + + /* Enable PCI interrupt */ + setbits_le32(priv->cfg_base + RCAR_PCI_INT_ENABLE_REG, + RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME); + + return 0; +} + +static int rcar_gen2_pci_of_to_plat(struct udevice *dev) +{ + struct rcar_gen2_pci_priv *priv = dev_get_priv(dev); + + priv->cfg_base = devfdt_get_addr_index(dev, 0); + priv->mem_base = devfdt_get_addr_index(dev, 1); + if (!priv->cfg_base || !priv->mem_base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops rcar_gen2_pci_ops = { + .read_config = rcar_gen2_pci_read_config, + .write_config = rcar_gen2_pci_write_config, +}; + +static const struct udevice_id rcar_gen2_pci_ids[] = { + { .compatible = "renesas,pci-rcar-gen2" }, + { } +}; + +U_BOOT_DRIVER(rcar_gen2_pci) = { + .name = "rcar_gen2_pci", + .id = UCLASS_PCI, + .of_match = rcar_gen2_pci_ids, + .ops = &rcar_gen2_pci_ops, + .probe = rcar_gen2_pci_probe, + .of_to_plat = rcar_gen2_pci_of_to_plat, + .priv_auto = sizeof(struct rcar_gen2_pci_priv), +}; diff --git a/drivers/pci/pci-rcar-gen3.c b/drivers/pci/pci-rcar-gen3.c new file mode 100644 index 00000000000..76878246f1e --- /dev/null +++ b/drivers/pci/pci-rcar-gen3.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RCar Gen3 PCIEC driver + * + * Copyright (C) 2018-2019 Marek Vasut <marek.vasut@gmail.com> + * + * Based on Linux PCIe driver for Renesas R-Car SoCs + * Copyright (C) 2014 Renesas Electronics Europe Ltd + * + * Based on: + * arch/sh/drivers/pci/pcie-sh7786.c + * arch/sh/drivers/pci/ops-sh7786.c + * Copyright (C) 2009 - 2011 Paul Mundt + * + * Author: Phil Edworthy <phil.edworthy@renesas.com> + */ + +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <errno.h> +#include <pci.h> +#include <wait_bit.h> +#include <linux/bitops.h> +#include <linux/log2.h> + +#define PCIECAR 0x000010 +#define PCIECCTLR 0x000018 +#define SEND_ENABLE BIT(31) +#define TYPE0 (0 << 8) +#define TYPE1 BIT(8) +#define PCIECDR 0x000020 +#define PCIEMSR 0x000028 +#define PCIEINTXR 0x000400 +#define PCIEPHYSR 0x0007f0 +#define PHYRDY BIT(0) +#define PCIEMSITXR 0x000840 + +/* Transfer control */ +#define PCIETCTLR 0x02000 +#define CFINIT 1 +#define PCIETSTR 0x02004 +#define DATA_LINK_ACTIVE 1 +#define PCIEERRFR 0x02020 +#define UNSUPPORTED_REQUEST BIT(4) +#define PCIEMSIFR 0x02044 +#define PCIEMSIALR 0x02048 +#define MSIFE 1 +#define PCIEMSIAUR 0x0204c +#define PCIEMSIIER 0x02050 + +/* root port address */ +#define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) + +/* local address reg & mask */ +#define PCIELAR(x) (0x02200 + ((x) * 0x20)) +#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) +#define LAM_PREFETCH BIT(3) +#define LAM_64BIT BIT(2) +#define LAR_ENABLE BIT(1) + +/* PCIe address reg & mask */ +#define PCIEPALR(x) (0x03400 + ((x) * 0x20)) +#define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) +#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) +#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) +#define PAR_ENABLE BIT(31) +#define IO_SPACE BIT(8) + +/* Configuration */ +#define PCICONF(x) (0x010000 + ((x) * 0x4)) +#define PMCAP(x) (0x010040 + ((x) * 0x4)) +#define EXPCAP(x) (0x010070 + ((x) * 0x4)) +#define VCCAP(x) (0x010100 + ((x) * 0x4)) + +/* link layer */ +#define IDSETR1 0x011004 +#define TLCTLR 0x011048 +#define MACSR 0x011054 +#define SPCHGFIN BIT(4) +#define SPCHGFAIL BIT(6) +#define SPCHGSUC BIT(7) +#define LINK_SPEED (0xf << 16) +#define LINK_SPEED_2_5GTS (1 << 16) +#define LINK_SPEED_5_0GTS (2 << 16) +#define MACCTLR 0x011058 +#define SPEED_CHANGE BIT(24) +#define SCRAMBLE_DISABLE BIT(27) +#define MACS2R 0x011078 +#define MACCGSPSETR 0x011084 +#define SPCNGRSN BIT(31) + +/* R-Car H1 PHY */ +#define H1_PCIEPHYADRR 0x04000c +#define WRITE_CMD BIT(16) +#define PHY_ACK BIT(24) +#define RATE_POS 12 +#define LANE_POS 8 +#define ADR_POS 0 +#define H1_PCIEPHYDOUTR 0x040014 + +/* R-Car Gen2 PHY */ +#define GEN2_PCIEPHYADDR 0x780 +#define GEN2_PCIEPHYDATA 0x784 +#define GEN2_PCIEPHYCTRL 0x78c + +#define INT_PCI_MSI_NR 32 + +#define RCONF(x) (PCICONF(0) + (x)) +#define RPMCAP(x) (PMCAP(0) + (x)) +#define REXPCAP(x) (EXPCAP(0) + (x)) +#define RVCCAP(x) (VCCAP(0) + (x)) + +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) + +#define RCAR_PCI_MAX_RESOURCES 4 +#define MAX_NR_INBOUND_MAPS 6 + +enum { + RCAR_PCI_ACCESS_READ, + RCAR_PCI_ACCESS_WRITE, +}; + +struct rcar_gen3_pcie_priv { + fdt_addr_t regs; +}; + +static void rcar_rmw32(struct udevice *dev, int where, u32 mask, u32 data) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + int shift = 8 * (where & 3); + + clrsetbits_le32(priv->regs + (where & ~3), + mask << shift, data << shift); +} + +static u32 rcar_read_conf(const struct udevice *dev, int where) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + int shift = 8 * (where & 3); + + return readl(priv->regs + (where & ~3)) >> shift; +} + +static int rcar_pcie_config_access(const struct udevice *udev, + unsigned char access_type, + pci_dev_t bdf, int where, ulong *data) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(udev); + u32 reg = where & ~3; + + /* Root bus */ + if (PCI_DEV(bdf) == 0) { + if (access_type == RCAR_PCI_ACCESS_READ) + *data = readl(priv->regs + PCICONF(where / 4)); + else + writel(*data, priv->regs + PCICONF(where / 4)); + + return 0; + } + + /* Clear errors */ + clrbits_le32(priv->regs + PCIEERRFR, 0); + + /* Set the PIO address */ + writel((bdf << 8) | reg, priv->regs + PCIECAR); + + /* Enable the configuration access */ + if (!PCI_BUS(bdf)) + writel(SEND_ENABLE | TYPE0, priv->regs + PCIECCTLR); + else + writel(SEND_ENABLE | TYPE1, priv->regs + PCIECCTLR); + + /* Check for errors */ + if (readl(priv->regs + PCIEERRFR) & UNSUPPORTED_REQUEST) + return -ENODEV; + + /* Check for master and target aborts */ + if (rcar_read_conf(udev, RCONF(PCI_STATUS)) & + (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) + return -ENODEV; + + if (access_type == RCAR_PCI_ACCESS_READ) + *data = readl(priv->regs + PCIECDR); + else + writel(*data, priv->regs + PCIECDR); + + /* Disable the configuration access */ + writel(0, priv->regs + PCIECCTLR); + + return 0; +} + +static int rcar_gen3_pcie_addr_valid(pci_dev_t d, uint where) +{ + u32 slot; + + if (PCI_BUS(d)) + return -EINVAL; + + if (PCI_FUNC(d)) + return -EINVAL; + + slot = PCI_DEV(d); + if (slot > 1) + return -EINVAL; + + return 0; +} + +static int rcar_gen3_pcie_read_config(const struct udevice *dev, pci_dev_t bdf, + uint where, ulong *val, + enum pci_size_t size) +{ + ulong reg; + int ret; + + ret = rcar_gen3_pcie_addr_valid(bdf, where); + if (ret) { + *val = pci_get_ff(size); + return 0; + } + + ret = rcar_pcie_config_access(dev, RCAR_PCI_ACCESS_READ, + bdf, where, ®); + if (ret != 0) + reg = 0xffffffffUL; + + *val = pci_conv_32_to_size(reg, where, size); + + return ret; +} + +static int rcar_gen3_pcie_write_config(struct udevice *dev, pci_dev_t bdf, + uint where, ulong val, + enum pci_size_t size) +{ + ulong data; + int ret; + + ret = rcar_gen3_pcie_addr_valid(bdf, where); + if (ret) + return ret; + + data = pci_conv_32_to_size(val, where, size); + + ret = rcar_pcie_config_access(dev, RCAR_PCI_ACCESS_WRITE, + bdf, where, &data); + + return ret; +} + +static int rcar_gen3_pcie_wait_for_phyrdy(struct udevice *dev) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + + return wait_for_bit_le32((void *)priv->regs + PCIEPHYSR, PHYRDY, + true, 50, false); +} + +static int rcar_gen3_pcie_wait_for_dl(struct udevice *dev) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + + return wait_for_bit_le32((void *)priv->regs + PCIETSTR, + DATA_LINK_ACTIVE, true, 50, false); +} + +static int rcar_gen3_pcie_hw_init(struct udevice *dev) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + int ret; + + /* Begin initialization */ + writel(0, priv->regs + PCIETCTLR); + + /* Set mode */ + writel(1, priv->regs + PCIEMSR); + + ret = rcar_gen3_pcie_wait_for_phyrdy(dev); + if (ret) + return ret; + + /* + * Initial header for port config space is type 1, set the device + * class to match. Hardware takes care of propagating the IDSETR + * settings, so there is no need to bother with a quirk. + */ + writel(PCI_CLASS_BRIDGE_PCI_NORMAL << 8, priv->regs + IDSETR1); + + /* + * Setup Secondary Bus Number & Subordinate Bus Number, even though + * they aren't used, to avoid bridge being detected as broken. + */ + rcar_rmw32(dev, RCONF(PCI_SECONDARY_BUS), 0xff, 1); + rcar_rmw32(dev, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); + + /* Initialize default capabilities. */ + rcar_rmw32(dev, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); + rcar_rmw32(dev, REXPCAP(PCI_EXP_FLAGS), + PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); + rcar_rmw32(dev, RCONF(PCI_HEADER_TYPE), 0x7f, + PCI_HEADER_TYPE_BRIDGE); + + /* Enable data link layer active state reporting */ + rcar_rmw32(dev, REXPCAP(PCI_EXP_LNKCAP), + PCI_EXP_LNKCAP_DLLLARC, PCI_EXP_LNKCAP_DLLLARC); + + /* Write out the physical slot number = 0 */ + rcar_rmw32(dev, REXPCAP(PCI_EXP_SLTCAP), + PCI_EXP_SLTCAP_PSN, 0); + + /* Set the completion timer timeout to the maximum 50ms. */ + rcar_rmw32(dev, TLCTLR + 1, 0x3f, 50); + + /* Terminate list of capabilities (Next Capability Offset=0) */ + rcar_rmw32(dev, RVCCAP(0), 0xfff00000, 0); + + /* Finish initialization - establish a PCI Express link */ + writel(CFINIT, priv->regs + PCIETCTLR); + + return rcar_gen3_pcie_wait_for_dl(dev); +} + +static int rcar_gen3_pcie_probe(struct udevice *dev) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + struct pci_controller *hose = dev_get_uclass_priv(dev); + struct clk pci_clk; + u32 mask; + int i, cnt, ret; + + ret = clk_get_by_index(dev, 0, &pci_clk); + if (ret) + return ret; + + ret = clk_enable(&pci_clk); + if (ret) + return ret; + + for (i = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags != PCI_REGION_SYS_MEMORY) + continue; + + if (hose->regions[i].phys_start == 0) + continue; + + mask = (roundup_pow_of_two(hose->regions[i].size) - 1) & ~0xf; + mask |= LAR_ENABLE; + writel(rounddown_pow_of_two(hose->regions[i].phys_start), + priv->regs + PCIEPRAR(0)); + writel(rounddown_pow_of_two(hose->regions[i].phys_start), + priv->regs + PCIELAR(0)); + writel(mask, priv->regs + PCIELAMR(0)); + break; + } + + writel(0, priv->regs + PCIEPRAR(1)); + writel(0, priv->regs + PCIELAR(1)); + writel(0, priv->regs + PCIELAMR(1)); + + ret = rcar_gen3_pcie_hw_init(dev); + if (ret) + return ret; + + for (i = 0, cnt = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags == PCI_REGION_SYS_MEMORY) + continue; + + writel(0, priv->regs + PCIEPTCTLR(cnt)); + writel((hose->regions[i].size - 1) & ~0x7f, + priv->regs + PCIEPAMR(cnt)); + writel(upper_32_bits(hose->regions[i].phys_start), + priv->regs + PCIEPAUR(cnt)); + writel(lower_32_bits(hose->regions[i].phys_start), + priv->regs + PCIEPALR(cnt)); + mask = PAR_ENABLE; + if (hose->regions[i].flags == PCI_REGION_IO) + mask |= IO_SPACE; + writel(mask, priv->regs + PCIEPTCTLR(cnt)); + + cnt++; + } + + return 0; +} + +static int rcar_gen3_pcie_of_to_plat(struct udevice *dev) +{ + struct rcar_gen3_pcie_priv *priv = dev_get_plat(dev); + + priv->regs = devfdt_get_addr_index(dev, 0); + if (!priv->regs) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops rcar_gen3_pcie_ops = { + .read_config = rcar_gen3_pcie_read_config, + .write_config = rcar_gen3_pcie_write_config, +}; + +static const struct udevice_id rcar_gen3_pcie_ids[] = { + { .compatible = "renesas,pcie-rcar-gen3" }, + { } +}; + +U_BOOT_DRIVER(rcar_gen3_pcie) = { + .name = "rcar_gen3_pcie", + .id = UCLASS_PCI, + .of_match = rcar_gen3_pcie_ids, + .ops = &rcar_gen3_pcie_ops, + .probe = rcar_gen3_pcie_probe, + .of_to_plat = rcar_gen3_pcie_of_to_plat, + .plat_auto = sizeof(struct rcar_gen3_pcie_priv), +}; diff --git a/drivers/pci/pci-uclass.c b/drivers/pci/pci-uclass.c new file mode 100644 index 00000000000..6571e653049 --- /dev/null +++ b/drivers/pci/pci-uclass.c @@ -0,0 +1,1923 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2014 Google, Inc + * Written by Simon Glass <sjg@chromium.org> + */ + +#define LOG_CATEGORY UCLASS_PCI + +#include <dm.h> +#include <errno.h> +#include <init.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <spl.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <dm/device-internal.h> +#include <dm/lists.h> +#include <dm/uclass-internal.h> +#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) +#include <asm/fsp/fsp_support.h> +#endif +#include <dt-bindings/pci/pci.h> +#include <linux/delay.h> +#include <linux/printk.h> +#include "pci_internal.h" + +DECLARE_GLOBAL_DATA_PTR; + +int pci_get_bus(int busnum, struct udevice **busp) +{ + int ret; + + ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); + + /* Since buses may not be numbered yet try a little harder with bus 0 */ + if (ret == -ENODEV) { + ret = uclass_first_device_err(UCLASS_PCI, busp); + if (ret) + return ret; + ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); + } + + return ret; +} + +struct udevice *pci_get_controller(struct udevice *dev) +{ + while (device_is_on_pci_bus(dev)) + dev = dev->parent; + + return dev; +} + +pci_dev_t dm_pci_get_bdf(const struct udevice *dev) +{ + struct pci_child_plat *pplat = dev_get_parent_plat(dev); + struct udevice *bus = dev->parent; + + /* + * This error indicates that @dev is a device on an unprobed PCI bus. + * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below + * will produce a bad BDF> + * + * A common cause of this problem is that this function is called in the + * of_to_plat() method of @dev. Accessing the PCI bus in that + * method is not allowed, since it has not yet been probed. To fix this, + * move that access to the probe() method of @dev instead. + */ + if (!device_active(bus)) + log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name, + bus->name); + return PCI_ADD_BUS(dev_seq(bus), pplat->devfn); +} + +/** + * pci_get_bus_max() - returns the bus number of the last active bus + * + * Return: last bus number, or -1 if no active buses + */ +static int pci_get_bus_max(void) +{ + struct udevice *bus; + struct uclass *uc; + int ret = -1; + + ret = uclass_get(UCLASS_PCI, &uc); + uclass_foreach_dev(bus, uc) { + if (dev_seq(bus) > ret) + ret = dev_seq(bus); + } + + debug("%s: ret=%d\n", __func__, ret); + + return ret; +} + +int pci_last_busno(void) +{ + return pci_get_bus_max(); +} + +int pci_get_ff(enum pci_size_t size) +{ + switch (size) { + case PCI_SIZE_8: + return 0xff; + case PCI_SIZE_16: + return 0xffff; + default: + return 0xffffffff; + } +} + +static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf, + ofnode *rnode) +{ + struct fdt_pci_addr addr; + ofnode node; + int ret; + + dev_for_each_subnode(node, bus) { + ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg", + &addr, NULL); + if (ret) + continue; + + if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf)) + continue; + + *rnode = node; + break; + } +}; + +int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn, + struct udevice **devp) +{ + struct udevice *dev; + + for (device_find_first_child(bus, &dev); + dev; + device_find_next_child(&dev)) { + struct pci_child_plat *pplat; + + pplat = dev_get_parent_plat(dev); + if (pplat && pplat->devfn == find_devfn) { + *devp = dev; + return 0; + } + } + + return -ENODEV; +} + +int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) +{ + struct udevice *bus; + int ret; + + ret = pci_get_bus(PCI_BUS(bdf), &bus); + if (ret) + return ret; + return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); +} + +static int pci_device_matches_ids(struct udevice *dev, + const struct pci_device_id *ids) +{ + struct pci_child_plat *pplat; + int i; + + pplat = dev_get_parent_plat(dev); + if (!pplat) + return -EINVAL; + for (i = 0; ids[i].vendor != 0; i++) { + if (pplat->vendor == ids[i].vendor && + pplat->device == ids[i].device) + return i; + } + + return -EINVAL; +} + +int pci_bus_find_devices(struct udevice *bus, const struct pci_device_id *ids, + int *indexp, struct udevice **devp) +{ + struct udevice *dev; + + /* Scan all devices on this bus */ + for (device_find_first_child(bus, &dev); + dev; + device_find_next_child(&dev)) { + if (pci_device_matches_ids(dev, ids) >= 0) { + if ((*indexp)-- <= 0) { + *devp = dev; + return 0; + } + } + } + + return -ENODEV; +} + +int pci_find_device_id(const struct pci_device_id *ids, int index, + struct udevice **devp) +{ + struct udevice *bus; + + /* Scan all known buses */ + for (uclass_first_device(UCLASS_PCI, &bus); + bus; + uclass_next_device(&bus)) { + if (!pci_bus_find_devices(bus, ids, &index, devp)) + return 0; + } + *devp = NULL; + + return -ENODEV; +} + +static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, + unsigned int device, int *indexp, + struct udevice **devp) +{ + struct pci_child_plat *pplat; + struct udevice *dev; + + for (device_find_first_child(bus, &dev); + dev; + device_find_next_child(&dev)) { + pplat = dev_get_parent_plat(dev); + if (pplat->vendor == vendor && pplat->device == device) { + if (!(*indexp)--) { + *devp = dev; + return 0; + } + } + } + + return -ENODEV; +} + +int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, + struct udevice **devp) +{ + struct udevice *bus; + + /* Scan all known buses */ + for (uclass_first_device(UCLASS_PCI, &bus); + bus; + uclass_next_device(&bus)) { + if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) + return device_probe(*devp); + } + *devp = NULL; + + return -ENODEV; +} + +int dm_pci_find_class(uint find_class, int index, struct udevice **devp) +{ + struct udevice *dev; + + /* Scan all known buses */ + for (pci_find_first_device(&dev); + dev; + pci_find_next_device(&dev)) { + struct pci_child_plat *pplat = dev_get_parent_plat(dev); + + if (pplat->class == find_class && !index--) { + *devp = dev; + return device_probe(*devp); + } + } + *devp = NULL; + + return -ENODEV; +} + +int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, + unsigned long value, enum pci_size_t size) +{ + struct dm_pci_ops *ops; + + ops = pci_get_ops(bus); + if (!ops->write_config) + return -ENOSYS; + if (offset < 0 || offset >= 4096) + return -EINVAL; + return ops->write_config(bus, bdf, offset, value, size); +} + +int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset, + u32 clr, u32 set) +{ + ulong val; + int ret; + + ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32); + if (ret) + return ret; + val &= ~clr; + val |= set; + + return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32); +} + +static int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, + enum pci_size_t size) +{ + struct udevice *bus; + int ret; + + ret = pci_get_bus(PCI_BUS(bdf), &bus); + if (ret) + return ret; + + return pci_bus_write_config(bus, bdf, offset, value, size); +} + +int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, + enum pci_size_t size) +{ + struct udevice *bus; + + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, + size); +} + +int pci_write_config32(pci_dev_t bdf, int offset, u32 value) +{ + return pci_write_config(bdf, offset, value, PCI_SIZE_32); +} + +int pci_write_config16(pci_dev_t bdf, int offset, u16 value) +{ + return pci_write_config(bdf, offset, value, PCI_SIZE_16); +} + +int pci_write_config8(pci_dev_t bdf, int offset, u8 value) +{ + return pci_write_config(bdf, offset, value, PCI_SIZE_8); +} + +int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) +{ + return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); +} + +int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) +{ + return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); +} + +int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) +{ + return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); +} + +int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset, + unsigned long *valuep, enum pci_size_t size) +{ + struct dm_pci_ops *ops; + + ops = pci_get_ops(bus); + if (!ops->read_config) { + *valuep = pci_conv_32_to_size(~0, offset, size); + return -ENOSYS; + } + if (offset < 0 || offset >= 4096) { + *valuep = pci_conv_32_to_size(0, offset, size); + return -EINVAL; + } + return ops->read_config(bus, bdf, offset, valuep, size); +} + +static int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, + enum pci_size_t size) +{ + struct udevice *bus; + int ret; + + ret = pci_get_bus(PCI_BUS(bdf), &bus); + if (ret) + return ret; + + return pci_bus_read_config(bus, bdf, offset, valuep, size); +} + +int dm_pci_read_config(const struct udevice *dev, int offset, + unsigned long *valuep, enum pci_size_t size) +{ + const struct udevice *bus; + + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, + size); +} + +int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) +{ + unsigned long value; + int ret; + + ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) +{ + unsigned long value; + int ret; + + ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) +{ + unsigned long value; + int ret; + + ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep) +{ + unsigned long value; + int ret; + + ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep) +{ + unsigned long value; + int ret; + + ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep) +{ + unsigned long value; + int ret; + + ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); + if (ret) + return ret; + *valuep = value; + + return 0; +} + +int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set) +{ + u8 val; + int ret; + + ret = dm_pci_read_config8(dev, offset, &val); + if (ret) + return ret; + val &= ~clr; + val |= set; + + return dm_pci_write_config8(dev, offset, val); +} + +int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set) +{ + u16 val; + int ret; + + ret = dm_pci_read_config16(dev, offset, &val); + if (ret) + return ret; + val &= ~clr; + val |= set; + + return dm_pci_write_config16(dev, offset, val); +} + +int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set) +{ + u32 val; + int ret; + + ret = dm_pci_read_config32(dev, offset, &val); + if (ret) + return ret; + val &= ~clr; + val |= set; + + return dm_pci_write_config32(dev, offset, val); +} + +static void set_vga_bridge_bits(struct udevice *dev) +{ + struct udevice *parent = dev->parent; + u16 bc; + + while (dev_seq(parent) != 0) { + dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); + bc |= PCI_BRIDGE_CTL_VGA; + dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); + parent = parent->parent; + } +} + +int pci_auto_config_devices(struct udevice *bus) +{ + struct pci_controller *hose = dev_get_uclass_priv(bus); + struct pci_child_plat *pplat; + unsigned int sub_bus; + struct udevice *dev; + + sub_bus = dev_seq(bus); + debug("%s: start\n", __func__); + pciauto_config_init(hose); + for (device_find_first_child(bus, &dev); + dev; + device_find_next_child(&dev)) { + unsigned int max_bus; + int ret; + + debug("%s: device %s\n", __func__, dev->name); + if (dev_has_ofnode(dev) && + dev_read_bool(dev, "pci,no-autoconfig")) + continue; + ret = dm_pciauto_config_device(dev); + if (ret < 0) + return log_msg_ret("auto", ret); + max_bus = ret; + sub_bus = max(sub_bus, max_bus); + + if (dev_get_parent(dev) == bus) + continue; + + pplat = dev_get_parent_plat(dev); + if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) + set_vga_bridge_bits(dev); + } + if (hose->last_busno < sub_bus) + hose->last_busno = sub_bus; + debug("%s: done\n", __func__); + + return log_msg_ret("sub", sub_bus); +} + +int pci_generic_mmap_write_config( + const struct udevice *bus, + int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset, + void **addrp), + pci_dev_t bdf, + uint offset, + ulong value, + enum pci_size_t size) +{ + void *address; + + if (addr_f(bus, bdf, offset, &address) < 0) + return 0; + + switch (size) { + case PCI_SIZE_8: + writeb(value, address); + return 0; + case PCI_SIZE_16: + writew(value, address); + return 0; + case PCI_SIZE_32: + writel(value, address); + return 0; + default: + return -EINVAL; + } +} + +int pci_generic_mmap_read_config( + const struct udevice *bus, + int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset, + void **addrp), + pci_dev_t bdf, + uint offset, + ulong *valuep, + enum pci_size_t size) +{ + void *address; + + if (addr_f(bus, bdf, offset, &address) < 0) { + *valuep = pci_get_ff(size); + return 0; + } + + switch (size) { + case PCI_SIZE_8: + *valuep = readb(address); + return 0; + case PCI_SIZE_16: + *valuep = readw(address); + return 0; + case PCI_SIZE_32: + *valuep = readl(address); + return 0; + default: + return -EINVAL; + } +} + +int dm_pci_hose_probe_bus(struct udevice *bus) +{ + u8 header_type; + int sub_bus; + int ret; + int ea_pos; + u8 reg; + + debug("%s\n", __func__); + + dm_pci_read_config8(bus, PCI_HEADER_TYPE, &header_type); + header_type &= 0x7f; + if (header_type != PCI_HEADER_TYPE_BRIDGE) { + debug("%s: Skipping PCI device %d with Non-Bridge Header Type 0x%x\n", + __func__, PCI_DEV(dm_pci_get_bdf(bus)), header_type); + return log_msg_ret("probe", -EINVAL); + } + + if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION)) + ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA); + else + ea_pos = 0; + + if (ea_pos) { + dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8), + ®); + sub_bus = reg; + } else { + sub_bus = pci_get_bus_max() + 1; + } + debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); + dm_pciauto_prescan_setup_bridge(bus, sub_bus); + + ret = device_probe(bus); + if (ret) { + debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, + ret); + return log_msg_ret("probe", ret); + } + + if (!ea_pos) + sub_bus = pci_get_bus_max(); + + dm_pciauto_postscan_setup_bridge(bus, sub_bus); + + return sub_bus; +} + +/** + * pci_match_one_device - Tell if a PCI device structure has a matching + * PCI device id structure + * @id: single PCI device id structure to match + * @find: the PCI device id structure to match against + * + * Returns true if the finding pci_device_id structure matched or false if + * there is no match. + */ +static bool pci_match_one_id(const struct pci_device_id *id, + const struct pci_device_id *find) +{ + if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && + (id->device == PCI_ANY_ID || id->device == find->device) && + (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && + (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && + !((id->class ^ find->class) & id->class_mask)) + return true; + + return false; +} + +/** + * pci_need_device_pre_reloc() - Check if a device should be bound + * + * This checks a list of vendor/device-ID values indicating devices that should + * be bound before relocation. + * + * @bus: Bus to check + * @vendor: Vendor ID to check + * @device: Device ID to check + * Return: true if the vendor/device is in the list, false if not + */ +static bool pci_need_device_pre_reloc(struct udevice *bus, uint vendor, + uint device) +{ + u32 vendev; + int index; + + if (spl_phase() == PHASE_SPL && CONFIG_IS_ENABLED(PCI_PNP)) + return true; + + for (index = 0; + !dev_read_u32_index(bus, "u-boot,pci-pre-reloc", index, + &vendev); + index++) { + if (vendev == PCI_VENDEV(vendor, device)) + return true; + } + + return false; +} + +/** + * pci_find_and_bind_driver() - Find and bind the right PCI driver + * + * This only looks at certain fields in the descriptor. + * + * @parent: Parent bus + * @find_id: Specification of the driver to find + * @bdf: Bus/device/function addreess - see PCI_BDF() + * @devp: Returns a pointer to the device created + * Return: 0 if OK, -EPERM if the device is not needed before relocation and + * therefore was not created, other -ve value on error + */ +static int pci_find_and_bind_driver(struct udevice *parent, + struct pci_device_id *find_id, + pci_dev_t bdf, struct udevice **devp) +{ + struct pci_driver_entry *start, *entry; + ofnode node = ofnode_null(); + const char *drv; + int n_ents; + int ret; + char name[30], *str; + bool bridge; + + *devp = NULL; + + debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, + find_id->vendor, find_id->device); + + /* Determine optional OF node */ + if (ofnode_valid(dev_ofnode(parent))) + pci_dev_find_ofnode(parent, bdf, &node); + + if (ofnode_valid(node) && !ofnode_is_enabled(node)) { + debug("%s: Ignoring disabled device\n", __func__); + return log_msg_ret("dis", -EPERM); + } + + start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); + n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); + for (entry = start; entry != start + n_ents; entry++) { + const struct pci_device_id *id; + struct udevice *dev; + const struct driver *drv; + + for (id = entry->match; + id->vendor || id->subvendor || id->class_mask; + id++) { + if (!pci_match_one_id(id, find_id)) + continue; + + drv = entry->driver; + + /* + * In the pre-relocation phase, we only bind devices + * whose driver has the DM_FLAG_PRE_RELOC set, to save + * precious memory space as on some platforms as that + * space is pretty limited (ie: using Cache As RAM). + */ + if (!(gd->flags & GD_FLG_RELOC) && + !(drv->flags & DM_FLAG_PRE_RELOC) && + (!CONFIG_IS_ENABLED(PCI_PNP) || + spl_phase() != PHASE_SPL)) + return log_msg_ret("pre", -EPERM); + + /* + * We could pass the descriptor to the driver as + * plat (instead of NULL) and allow its bind() + * method to return -ENOENT if it doesn't support this + * device. That way we could continue the search to + * find another driver. For now this doesn't seem + * necesssary, so just bind the first match. + */ + ret = device_bind(parent, drv, drv->name, NULL, node, + &dev); + if (ret) + goto error; + debug("%s: Match found: %s\n", __func__, drv->name); + dev->driver_data = id->driver_data; + *devp = dev; + return 0; + } + } + + bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; + /* + * In the pre-relocation phase, we only bind bridge devices to save + * precious memory space as on some platforms as that space is pretty + * limited (ie: using Cache As RAM). + */ + if (!(gd->flags & GD_FLG_RELOC) && !bridge && + !pci_need_device_pre_reloc(parent, find_id->vendor, + find_id->device)) + return log_msg_ret("notbr", -EPERM); + + /* Bind a generic driver so that the device can be used */ + sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf), + PCI_FUNC(bdf)); + str = strdup(name); + if (!str) + return -ENOMEM; + drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; + + ret = device_bind_driver_to_node(parent, drv, str, node, devp); + if (ret) { + debug("%s: Failed to bind generic driver: %d\n", __func__, ret); + free(str); + return ret; + } + debug("%s: No match found: bound generic driver instead\n", __func__); + + return 0; + +error: + debug("%s: No match found: error %d\n", __func__, ret); + return ret; +} + +__weak extern void board_pci_fixup_dev(struct udevice *bus, struct udevice *dev) +{ +} + +int pci_bind_bus_devices(struct udevice *bus) +{ + ulong vendor, device; + ulong header_type; + pci_dev_t bdf, end; + bool found_multi; + int ari_off; + int ret; + + found_multi = false; + end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1, + PCI_MAX_PCI_FUNCTIONS - 1); + for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end; + bdf += PCI_BDF(0, 0, 1)) { + struct pci_child_plat *pplat; + struct udevice *dev; + ulong class; + + if (!PCI_FUNC(bdf)) + found_multi = false; + if (PCI_FUNC(bdf) && !found_multi) + continue; + + /* Check only the first access, we don't expect problems */ + ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, + PCI_SIZE_16); + if (ret || vendor == 0xffff || vendor == 0x0000) + continue; + + pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, + &header_type, PCI_SIZE_8); + + if (!PCI_FUNC(bdf)) + found_multi = header_type & 0x80; + + debug("%s: bus %d/%s: found device %x, function %d", __func__, + dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); + pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, + PCI_SIZE_16); + pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, + PCI_SIZE_32); + class >>= 8; + + /* Find this device in the device tree */ + ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); + debug(": find ret=%d\n", ret); + + /* If nothing in the device tree, bind a device */ + if (ret == -ENODEV) { + struct pci_device_id find_id; + ulong val; + + memset(&find_id, '\0', sizeof(find_id)); + find_id.vendor = vendor; + find_id.device = device; + find_id.class = class; + if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { + pci_bus_read_config(bus, bdf, + PCI_SUBSYSTEM_VENDOR_ID, + &val, PCI_SIZE_32); + find_id.subvendor = val & 0xffff; + find_id.subdevice = val >> 16; + } + ret = pci_find_and_bind_driver(bus, &find_id, bdf, + &dev); + } else { + debug("device: %s\n", dev->name); + } + if (ret == -EPERM) + continue; + else if (ret) + return ret; + + /* Update the platform data */ + pplat = dev_get_parent_plat(dev); + pplat->devfn = PCI_MASK_BUS(bdf); + pplat->vendor = vendor; + pplat->device = device; + pplat->class = class; + + if (IS_ENABLED(CONFIG_PCI_ARID)) { + ari_off = dm_pci_find_ext_capability(dev, + PCI_EXT_CAP_ID_ARI); + if (ari_off) { + u16 ari_cap; + + /* + * Read Next Function number in ARI Cap + * Register + */ + dm_pci_read_config16(dev, ari_off + 4, + &ari_cap); + /* + * Update next scan on this function number, + * subtract 1 in BDF to satisfy loop increment. + */ + if (ari_cap & 0xff00) { + bdf = PCI_BDF(PCI_BUS(bdf), + PCI_DEV(ari_cap), + PCI_FUNC(ari_cap)); + bdf = bdf - 0x100; + } + } + } + + board_pci_fixup_dev(bus, dev); + } + + return 0; +} + +static int decode_regions(struct pci_controller *hose, ofnode parent_node, + ofnode node) +{ + int pci_addr_cells, addr_cells, size_cells; + int cells_per_record; + struct bd_info *bd; + const u32 *prop; + int max_regions; + int len; + int i; + + /* handle booting from coreboot, etc. */ + if (!ll_boot_init()) + return 0; + + prop = ofnode_get_property(node, "ranges", &len); + if (!prop) { + debug("%s: Cannot decode regions\n", __func__); + return -EINVAL; + } + + pci_addr_cells = ofnode_read_simple_addr_cells(node); + addr_cells = ofnode_read_simple_addr_cells(parent_node); + size_cells = ofnode_read_simple_size_cells(node); + + /* PCI addresses are always 3-cells */ + len /= sizeof(u32); + cells_per_record = pci_addr_cells + addr_cells + size_cells; + hose->region_count = 0; + debug("%s: len=%d, cells_per_record=%d\n", __func__, len, + cells_per_record); + + /* Dynamically allocate the regions array */ + max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS; + hose->regions = (struct pci_region *) + calloc(1, max_regions * sizeof(struct pci_region)); + if (!hose->regions) + return -ENOMEM; + + for (i = 0; i < max_regions; i++, len -= cells_per_record) { + u64 pci_addr, addr, size; + int space_code; + u32 flags; + int type; + int pos; + + if (len < cells_per_record) + break; + flags = fdt32_to_cpu(prop[0]); + space_code = (flags >> 24) & 3; + pci_addr = fdtdec_get_number(prop + 1, 2); + prop += pci_addr_cells; + addr = fdtdec_get_number(prop, addr_cells); + prop += addr_cells; + size = fdtdec_get_number(prop, size_cells); + prop += size_cells; + debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n", + __func__, hose->region_count, pci_addr, addr, size, space_code); + if (space_code & 2) { + type = flags & (1U << 30) ? PCI_REGION_PREFETCH : + PCI_REGION_MEM; + } else if (space_code & 1) { + type = PCI_REGION_IO; + } else { + continue; + } + + if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) && + type == PCI_REGION_MEM && upper_32_bits(pci_addr)) { + debug(" - pci_addr beyond the 32-bit boundary, ignoring\n"); + continue; + } + + if (!IS_ENABLED(CONFIG_PHYS_64BIT) && upper_32_bits(addr)) { + debug(" - addr beyond the 32-bit boundary, ignoring\n"); + continue; + } + + if (~((pci_addr_t)0) - pci_addr < size) { + debug(" - PCI range exceeds max address, ignoring\n"); + continue; + } + + if (~((phys_addr_t)0) - addr < size) { + debug(" - phys range exceeds max address, ignoring\n"); + continue; + } + + pos = -1; + if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) { + for (i = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags == type) + pos = i; + } + } + + if (pos == -1) + pos = hose->region_count++; + debug(" - type=%d, pos=%d\n", type, pos); + pci_set_region(hose->regions + pos, pci_addr, addr, size, type); + } + + /* Add a region for our local memory */ + bd = gd->bd; + if (!bd) + return 0; + + for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) { + if (bd->bi_dram[i].size) { + phys_addr_t start = bd->bi_dram[i].start; + + if (IS_ENABLED(CONFIG_PCI_MAP_SYSTEM_MEMORY)) + start = virt_to_phys((void *)(uintptr_t)bd->bi_dram[i].start); + + pci_set_region(hose->regions + hose->region_count++, + start, start, bd->bi_dram[i].size, + PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); + } + } + + return 0; +} + +static int pci_uclass_pre_probe(struct udevice *bus) +{ + struct pci_controller *hose; + struct uclass *uc; + int ret; + + debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name, + bus->parent->name); + hose = dev_get_uclass_priv(bus); + + /* + * Set the sequence number, if device_bind() doesn't. We want control + * of this so that numbers are allocated as devices are probed. That + * ensures that sub-bus numbered is correct (sub-buses must get numbers + * higher than their parents) + */ + if (dev_seq(bus) == -1) { + ret = uclass_get(UCLASS_PCI, &uc); + if (ret) + return ret; + bus->seq_ = uclass_find_next_free_seq(uc); + } + + /* For bridges, use the top-level PCI controller */ + if (!device_is_on_pci_bus(bus)) { + hose->ctlr = bus; + ret = decode_regions(hose, dev_ofnode(bus->parent), + dev_ofnode(bus)); + if (ret) + return ret; + } else { + struct pci_controller *parent_hose; + + parent_hose = dev_get_uclass_priv(bus->parent); + hose->ctlr = parent_hose->bus; + } + + hose->bus = bus; + hose->first_busno = dev_seq(bus); + hose->last_busno = dev_seq(bus); + if (dev_has_ofnode(bus)) { + hose->skip_auto_config_until_reloc = + dev_read_bool(bus, + "u-boot,skip-auto-config-until-reloc"); + } + + return 0; +} + +static int pci_uclass_post_probe(struct udevice *bus) +{ + struct pci_controller *hose = dev_get_uclass_priv(bus); + int ret; + + debug("%s: probing bus %d\n", __func__, dev_seq(bus)); + ret = pci_bind_bus_devices(bus); + if (ret) + return log_msg_ret("bind", ret); + + if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() && + (!hose->skip_auto_config_until_reloc || + (gd->flags & GD_FLG_RELOC))) { + ret = pci_auto_config_devices(bus); + if (ret < 0) + return log_msg_ret("cfg", ret); + } + +#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) + /* + * Per Intel FSP specification, we should call FSP notify API to + * inform FSP that PCI enumeration has been done so that FSP will + * do any necessary initialization as required by the chipset's + * BIOS Writer's Guide (BWG). + * + * Unfortunately we have to put this call here as with driver model, + * the enumeration is all done on a lazy basis as needed, so until + * something is touched on PCI it won't happen. + * + * Note we only call this 1) after U-Boot is relocated, and 2) + * root bus has finished probing. + */ + if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) { + ret = fsp_init_phase_pci(); + if (ret) + return log_msg_ret("fsp", ret); + } +#endif + + return 0; +} + +static int pci_uclass_child_post_bind(struct udevice *dev) +{ + struct pci_child_plat *pplat; + + if (!dev_has_ofnode(dev)) + return 0; + + pplat = dev_get_parent_plat(dev); + + /* Extract vendor id and device id if available */ + ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device); + + /* Extract the devfn from fdt_pci_addr */ + pplat->devfn = pci_get_devfn(dev); + + return 0; +} + +static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct pci_controller *hose = dev_get_uclass_priv(bus); + + return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); +} + +static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct pci_controller *hose = dev_get_uclass_priv(bus); + + return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); +} + +static int skip_to_next_device(struct udevice *bus, struct udevice **devp) +{ + struct udevice *dev; + + /* + * Scan through all the PCI controllers. On x86 there will only be one + * but that is not necessarily true on other hardware. + */ + while (bus) { + device_find_first_child(bus, &dev); + if (dev) { + *devp = dev; + return 0; + } + uclass_next_device(&bus); + } + + return 0; +} + +int pci_find_next_device(struct udevice **devp) +{ + struct udevice *child = *devp; + struct udevice *bus = child->parent; + + /* First try all the siblings */ + *devp = NULL; + while (child) { + device_find_next_child(&child); + if (child) { + *devp = child; + return 0; + } + } + + /* We ran out of siblings. Try the next bus */ + uclass_next_device(&bus); + + return bus ? skip_to_next_device(bus, devp) : 0; +} + +int pci_find_first_device(struct udevice **devp) +{ + struct udevice *bus; + + *devp = NULL; + uclass_first_device(UCLASS_PCI, &bus); + + return skip_to_next_device(bus, devp); +} + +ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) +{ + switch (size) { + case PCI_SIZE_8: + return (value >> ((offset & 3) * 8)) & 0xff; + case PCI_SIZE_16: + return (value >> ((offset & 2) * 8)) & 0xffff; + default: + return value; + } +} + +ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, + enum pci_size_t size) +{ + uint off_mask; + uint val_mask, shift; + ulong ldata, mask; + + switch (size) { + case PCI_SIZE_8: + off_mask = 3; + val_mask = 0xff; + break; + case PCI_SIZE_16: + off_mask = 2; + val_mask = 0xffff; + break; + default: + return value; + } + shift = (offset & off_mask) * 8; + ldata = (value & val_mask) << shift; + mask = val_mask << shift; + value = (old & ~mask) | ldata; + + return value; +} + +int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index) +{ + int pci_addr_cells, addr_cells, size_cells; + int cells_per_record; + const u32 *prop; + int len; + int i = 0; + + prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len); + if (!prop) { + log_err("PCI: Device '%s': Cannot decode dma-ranges\n", + dev->name); + return -EINVAL; + } + + pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev)); + addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent)); + size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev)); + + /* PCI addresses are always 3-cells */ + len /= sizeof(u32); + cells_per_record = pci_addr_cells + addr_cells + size_cells; + debug("%s: len=%d, cells_per_record=%d\n", __func__, len, + cells_per_record); + + while (len) { + memp->bus_start = fdtdec_get_number(prop + 1, 2); + prop += pci_addr_cells; + memp->phys_start = fdtdec_get_number(prop, addr_cells); + prop += addr_cells; + memp->size = fdtdec_get_number(prop, size_cells); + prop += size_cells; + + if (i == index) + return 0; + i++; + len -= cells_per_record; + } + + return -EINVAL; +} + +int pci_get_regions(struct udevice *dev, struct pci_region **iop, + struct pci_region **memp, struct pci_region **prefp) +{ + struct udevice *bus = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(bus); + int i; + + *iop = NULL; + *memp = NULL; + *prefp = NULL; + for (i = 0; i < hose->region_count; i++) { + switch (hose->regions[i].flags) { + case PCI_REGION_IO: + if (!*iop || (*iop)->size < hose->regions[i].size) + *iop = hose->regions + i; + break; + case PCI_REGION_MEM: + if (!*memp || (*memp)->size < hose->regions[i].size) + *memp = hose->regions + i; + break; + case (PCI_REGION_MEM | PCI_REGION_PREFETCH): + if (!*prefp || (*prefp)->size < hose->regions[i].size) + *prefp = hose->regions + i; + break; + } + } + + return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); +} + +u32 dm_pci_read_bar32(const struct udevice *dev, int barnum) +{ + u32 addr; + int bar; + + bar = PCI_BASE_ADDRESS_0 + barnum * 4; + dm_pci_read_config32(dev, bar, &addr); + + /* + * If we get an invalid address, return this so that comparisons with + * FDT_ADDR_T_NONE work correctly + */ + if (addr == 0xffffffff) + return addr; + else if (addr & PCI_BASE_ADDRESS_SPACE_IO) + return addr & PCI_BASE_ADDRESS_IO_MASK; + else + return addr & PCI_BASE_ADDRESS_MEM_MASK; +} + +void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) +{ + int bar; + + bar = PCI_BASE_ADDRESS_0 + barnum * 4; + dm_pci_write_config32(dev, bar, addr); +} + +phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, + size_t len, unsigned long mask, + unsigned long flags) +{ + struct udevice *ctlr; + struct pci_controller *hose; + struct pci_region *res; + pci_addr_t offset; + int i; + + /* The root controller has the region information */ + ctlr = pci_get_controller(dev); + hose = dev_get_uclass_priv(ctlr); + + if (hose->region_count == 0) + return bus_addr; + + for (i = 0; i < hose->region_count; i++) { + res = &hose->regions[i]; + + if ((res->flags & mask) != flags) + continue; + + if (bus_addr < res->bus_start) + continue; + + offset = bus_addr - res->bus_start; + if (offset >= res->size) + continue; + + if (len > res->size - offset) + continue; + + return res->phys_start + offset; + } + + puts("dm_pci_bus_to_phys: invalid physical address\n"); + return 0; +} + +pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, + size_t len, unsigned long mask, + unsigned long flags) +{ + struct udevice *ctlr; + struct pci_controller *hose; + struct pci_region *res; + phys_addr_t offset; + int i; + + /* The root controller has the region information */ + ctlr = pci_get_controller(dev); + hose = dev_get_uclass_priv(ctlr); + + if (hose->region_count == 0) + return phys_addr; + + for (i = 0; i < hose->region_count; i++) { + res = &hose->regions[i]; + + if ((res->flags & mask) != flags) + continue; + + if (phys_addr < res->phys_start) + continue; + + offset = phys_addr - res->phys_start; + if (offset >= res->size) + continue; + + if (len > res->size - offset) + continue; + + return res->bus_start + offset; + } + + puts("dm_pci_phys_to_bus: invalid physical address\n"); + return 0; +} + +static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off, + struct pci_child_plat *pdata) +{ + phys_addr_t addr = 0; + + /* + * In the case of a Virtual Function device using BAR + * base and size, add offset for VFn BAR(1, 2, 3...n) + */ + if (pdata->is_virtfn) { + size_t sz; + u32 ea_entry; + + /* MaxOffset, 1st DW */ + dm_pci_read_config32(dev, ea_off + 8, &ea_entry); + sz = ea_entry & PCI_EA_FIELD_MASK; + /* Fill up lower 2 bits */ + sz |= (~PCI_EA_FIELD_MASK); + + if (ea_entry & PCI_EA_IS_64) { + /* MaxOffset 2nd DW */ + dm_pci_read_config32(dev, ea_off + 16, &ea_entry); + sz |= ((u64)ea_entry) << 32; + } + + addr = (pdata->virtid - 1) * (sz + 1); + } + + return addr; +} + +static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, size_t offset, + size_t len, int ea_off, + struct pci_child_plat *pdata) +{ + int ea_cnt, i, entry_size; + int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2; + u32 ea_entry; + phys_addr_t addr; + + if (IS_ENABLED(CONFIG_PCI_SRIOV)) { + /* + * In the case of a Virtual Function device, device is + * Physical function, so pdata will point to required VF + * specific data. + */ + if (pdata->is_virtfn) + bar_id += PCI_EA_BEI_VF_BAR0; + } + + /* EA capability structure header */ + dm_pci_read_config32(dev, ea_off, &ea_entry); + ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK; + ea_off += PCI_EA_FIRST_ENT; + + for (i = 0; i < ea_cnt; i++, ea_off += entry_size) { + /* Entry header */ + dm_pci_read_config32(dev, ea_off, &ea_entry); + entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2; + + if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id) + continue; + + /* Base address, 1st DW */ + dm_pci_read_config32(dev, ea_off + 4, &ea_entry); + addr = ea_entry & PCI_EA_FIELD_MASK; + if (ea_entry & PCI_EA_IS_64) { + /* Base address, 2nd DW, skip over 4B MaxOffset */ + dm_pci_read_config32(dev, ea_off + 12, &ea_entry); + addr |= ((u64)ea_entry) << 32; + } + + if (IS_ENABLED(CONFIG_PCI_SRIOV)) + addr += dm_pci_map_ea_virt(dev, ea_off, pdata); + + if (~((phys_addr_t)0) - addr < offset) + return NULL; + + /* size ignored for now */ + return map_physmem(addr + offset, len, MAP_NOCACHE); + } + + return 0; +} + +void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len, + unsigned long mask, unsigned long flags) +{ + struct pci_child_plat *pdata = dev_get_parent_plat(dev); + struct udevice *udev = dev; + pci_addr_t pci_bus_addr; + u32 bar_response; + int ea_off; + + if (IS_ENABLED(CONFIG_PCI_SRIOV)) { + /* + * In case of Virtual Function devices, use PF udevice + * as EA capability is defined in Physical Function + */ + if (pdata->is_virtfn) + udev = pdata->pfdev; + } + + /* + * if the function supports Enhanced Allocation use that instead of + * BARs + * Incase of virtual functions, pdata will help read VF BEI + * and EA entry size. + */ + if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION)) + ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA); + else + ea_off = 0; + + if (ea_off) + return dm_pci_map_ea_bar(udev, bar, offset, len, ea_off, pdata); + + /* read BAR address */ + dm_pci_read_config32(udev, bar, &bar_response); + pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); + + /* This has a lot of baked in assumptions, but essentially tries + * to mirror the behavior of BAR assignment for 64 Bit enabled + * hosts and 64 bit placeable BARs in the auto assign code. + */ +#if defined(CONFIG_SYS_PCI_64BIT) + if (bar_response & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dm_pci_read_config32(udev, bar + 4, &bar_response); + pci_bus_addr |= (pci_addr_t)bar_response << 32; + } +#endif /* CONFIG_SYS_PCI_64BIT */ + + if (~((pci_addr_t)0) - pci_bus_addr < offset) + return NULL; + + /* + * Forward the length argument to dm_pci_bus_to_virt. The length will + * be used to check that the entire address range has been declared as + * a PCI range, but a better check would be to probe for the size of + * the bar and prevent overflow more locally. + */ + return dm_pci_bus_to_virt(udev, pci_bus_addr + offset, len, mask, flags, + MAP_NOCACHE); +} + +static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap) +{ + int ttl = PCI_FIND_CAP_TTL; + u8 id; + u16 ent; + + dm_pci_read_config8(dev, pos, &pos); + + while (ttl--) { + if (pos < PCI_STD_HEADER_SIZEOF) + break; + pos &= ~3; + dm_pci_read_config16(dev, pos, &ent); + + id = ent & 0xff; + if (id == 0xff) + break; + if (id == cap) + return pos; + pos = (ent >> 8); + } + + return 0; +} + +int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap) +{ + return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT, + cap); +} + +int dm_pci_find_capability(struct udevice *dev, int cap) +{ + u16 status; + u8 header_type; + u8 pos; + + dm_pci_read_config16(dev, PCI_STATUS, &status); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + + dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); + if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS) + pos = PCI_CB_CAPABILITY_LIST; + else + pos = PCI_CAPABILITY_LIST; + + return _dm_pci_find_next_capability(dev, pos, cap); +} + +int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap) +{ + u32 header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (start) + pos = start; + + dm_pci_read_config32(dev, pos, &header); + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl--) { + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + dm_pci_read_config32(dev, pos, &header); + } + + return 0; +} + +int dm_pci_find_ext_capability(struct udevice *dev, int cap) +{ + return dm_pci_find_next_ext_capability(dev, 0, cap); +} + +int dm_pci_flr(struct udevice *dev) +{ + int pcie_off; + u32 cap; + + /* look for PCI Express Capability */ + pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pcie_off) + return -ENOENT; + + /* check FLR capability */ + dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOENT; + + dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0, + PCI_EXP_DEVCTL_BCR_FLR); + + /* wait 100ms, per PCI spec */ + mdelay(100); + + return 0; +} + +#if defined(CONFIG_PCI_SRIOV) +int pci_sriov_init(struct udevice *pdev, int vf_en) +{ + u16 vendor, device; + struct udevice *bus; + struct udevice *dev; + pci_dev_t bdf; + u16 ctrl; + u16 num_vfs; + u16 total_vf; + u16 vf_offset; + u16 vf_stride; + int vf, ret; + int pos; + + pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + debug("Error: SRIOV capability not found\n"); + return -ENOENT; + } + + dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl); + + dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf); + if (vf_en > total_vf) + vf_en = total_vf; + dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en); + + ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; + dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl); + + dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs); + if (num_vfs > vf_en) + num_vfs = vf_en; + + dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset); + dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride); + + dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor); + dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device); + + bdf = dm_pci_get_bdf(pdev); + + ret = pci_get_bus(PCI_BUS(bdf), &bus); + if (ret) + return ret; + + bdf += PCI_BDF(0, 0, vf_offset); + + for (vf = 0; vf < num_vfs; vf++) { + struct pci_child_plat *pplat; + ulong class; + + pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE, + &class, PCI_SIZE_16); + + debug("%s: bus %d/%s: found VF %x:%x\n", __func__, + dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); + + /* Find this device in the device tree */ + ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); + + if (ret == -ENODEV) { + struct pci_device_id find_id; + + memset(&find_id, '\0', sizeof(find_id)); + find_id.vendor = vendor; + find_id.device = device; + find_id.class = class; + + ret = pci_find_and_bind_driver(bus, &find_id, + bdf, &dev); + + if (ret) + return ret; + } + + /* Update the platform data */ + pplat = dev_get_parent_plat(dev); + pplat->devfn = PCI_MASK_BUS(bdf); + pplat->vendor = vendor; + pplat->device = device; + pplat->class = class; + pplat->is_virtfn = true; + pplat->pfdev = pdev; + pplat->virtid = vf * vf_stride + vf_offset; + + debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n", + __func__, dev_seq(dev), dev->name, PCI_DEV(bdf), + PCI_FUNC(bdf), vendor, device, class, pplat->virtid); + bdf += PCI_BDF(0, 0, vf_stride); + } + + return 0; +} + +int pci_sriov_get_totalvfs(struct udevice *pdev) +{ + u16 total_vf; + int pos; + + pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + debug("Error: SRIOV capability not found\n"); + return -ENOENT; + } + + dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf); + + return total_vf; +} +#endif /* SRIOV */ + +UCLASS_DRIVER(pci) = { + .id = UCLASS_PCI, + .name = "pci", + .flags = DM_UC_FLAG_SEQ_ALIAS | DM_UC_FLAG_NO_AUTO_SEQ, + .post_bind = dm_scan_fdt_dev, + .pre_probe = pci_uclass_pre_probe, + .post_probe = pci_uclass_post_probe, + .child_post_bind = pci_uclass_child_post_bind, + .per_device_auto = sizeof(struct pci_controller), + .per_child_plat_auto = sizeof(struct pci_child_plat), +}; + +static const struct dm_pci_ops pci_bridge_ops = { + .read_config = pci_bridge_read_config, + .write_config = pci_bridge_write_config, +}; + +static const struct udevice_id pci_bridge_ids[] = { + { .compatible = "pci-bridge" }, + { } +}; + +U_BOOT_DRIVER(pci_bridge_drv) = { + .name = "pci_bridge_drv", + .id = UCLASS_PCI, + .of_match = pci_bridge_ids, + .ops = &pci_bridge_ops, +}; + +UCLASS_DRIVER(pci_generic) = { + .id = UCLASS_PCI_GENERIC, + .name = "pci_generic", +}; + +static const struct udevice_id pci_generic_ids[] = { + { .compatible = "pci-generic" }, + { } +}; + +U_BOOT_DRIVER(pci_generic_drv) = { + .name = "pci_generic_drv", + .id = UCLASS_PCI_GENERIC, + .of_match = pci_generic_ids, +}; + +int pci_init(void) +{ + struct udevice *bus; + + /* + * Enumerate all known controller devices. Enumeration has the side- + * effect of probing them, so PCIe devices will be enumerated too. + */ + for (uclass_first_device_check(UCLASS_PCI, &bus); + bus; + uclass_next_device_check(&bus)) { + ; + } + + return 0; +} diff --git a/drivers/pci/pci_auto.c b/drivers/pci/pci_auto.c new file mode 100644 index 00000000000..90f81886445 --- /dev/null +++ b/drivers/pci/pci_auto.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCI autoconfiguration library + * + * Author: Matt Porter <mporter@mvista.com> + * + * Copyright 2000 MontaVista Software Inc. + * Copyright (c) 2021 Maciej W. Rozycki <macro@orcam.me.uk> + */ + +#include <config.h> +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <pci.h> +#include <time.h> +#include "pci_internal.h" + +/* the user can define CFG_SYS_PCI_CACHE_LINE_SIZE to avoid problems */ +#ifndef CFG_SYS_PCI_CACHE_LINE_SIZE +#define CFG_SYS_PCI_CACHE_LINE_SIZE 8 +#endif + +static void dm_pciauto_setup_device(struct udevice *dev, + struct pci_region *mem, + struct pci_region *prefetch, + struct pci_region *io) +{ + u32 bar_response; + pci_size_t bar_size; + u16 cmdstat = 0; + int bar, bar_nr = 0; + int bars_num; + u8 header_type; + int rom_addr; + pci_addr_t bar_value; + struct pci_region *bar_res = NULL; + int found_mem64 = 0; + u16 class; + + dm_pci_read_config16(dev, PCI_COMMAND, &cmdstat); + cmdstat = (cmdstat & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | + PCI_COMMAND_MASTER; + + dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); + header_type &= 0x7f; + + switch (header_type) { + case PCI_HEADER_TYPE_NORMAL: + bars_num = 6; + break; + case PCI_HEADER_TYPE_BRIDGE: + bars_num = 2; + break; + case PCI_HEADER_TYPE_CARDBUS: + /* CardBus header does not have any BAR */ + bars_num = 0; + break; + default: + /* Skip configuring BARs for unknown header types */ + bars_num = 0; + break; + } + + for (bar = PCI_BASE_ADDRESS_0; + bar < PCI_BASE_ADDRESS_0 + (bars_num * 4); bar += 4) { + int ret = 0; + + /* Tickle the BAR and get the response */ + dm_pci_write_config32(dev, bar, 0xffffffff); + dm_pci_read_config32(dev, bar, &bar_response); + + /* If BAR is not implemented (or invalid) go to the next BAR */ + if (!bar_response || bar_response == 0xffffffff) + continue; + + found_mem64 = 0; + + /* Check the BAR type and set our address mask */ + if (bar_response & PCI_BASE_ADDRESS_SPACE) { + bar_size = bar_response & PCI_BASE_ADDRESS_IO_MASK; + bar_size &= ~(bar_size - 1); + + bar_res = io; + + debug("PCI Autoconfig: BAR %d, I/O, size=0x%llx, ", + bar_nr, (unsigned long long)bar_size); + } else { + if ((bar_response & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == + PCI_BASE_ADDRESS_MEM_TYPE_64) { + u32 bar_response_upper; + u64 bar64; + + dm_pci_write_config32(dev, bar + 4, 0xffffffff); + dm_pci_read_config32(dev, bar + 4, + &bar_response_upper); + + bar64 = ((u64)bar_response_upper << 32) | + bar_response; + + bar_size = ~(bar64 & PCI_BASE_ADDRESS_MEM_MASK) + + 1; + found_mem64 = 1; + } else { + bar_size = (u32)(~(bar_response & + PCI_BASE_ADDRESS_MEM_MASK) + 1); + } + + if (prefetch && + (bar_response & PCI_BASE_ADDRESS_MEM_PREFETCH)) + bar_res = prefetch; + else + bar_res = mem; + + debug("PCI Autoconfig: BAR %d, %s%s, size=0x%llx, ", + bar_nr, bar_res == prefetch ? "Prf" : "Mem", + found_mem64 ? "64" : "", + (unsigned long long)bar_size); + } + + ret = pciauto_region_allocate(bar_res, bar_size, + &bar_value, found_mem64); + if (ret) + printf("PCI: Failed autoconfig bar %x\n", bar); + + if (!ret) { + /* Write it out and update our limit */ + dm_pci_write_config32(dev, bar, (u32)bar_value); + + if (found_mem64) { + bar += 4; +#ifdef CONFIG_SYS_PCI_64BIT + dm_pci_write_config32(dev, bar, + (u32)(bar_value >> 32)); +#else + /* + * If we are a 64-bit decoder then increment to + * the upper 32 bits of the bar and force it to + * locate in the lower 4GB of memory. + */ + dm_pci_write_config32(dev, bar, 0x00000000); +#endif + } + } + + cmdstat |= (bar_response & PCI_BASE_ADDRESS_SPACE) ? + PCI_COMMAND_IO : PCI_COMMAND_MEMORY; + + debug("\n"); + + bar_nr++; + } + + /* Configure the expansion ROM address */ + if (header_type == PCI_HEADER_TYPE_NORMAL || + header_type == PCI_HEADER_TYPE_BRIDGE) { + rom_addr = (header_type == PCI_HEADER_TYPE_NORMAL) ? + PCI_ROM_ADDRESS : PCI_ROM_ADDRESS1; + dm_pci_write_config32(dev, rom_addr, 0xfffffffe); + dm_pci_read_config32(dev, rom_addr, &bar_response); + if (bar_response) { + bar_size = -(bar_response & ~1); + debug("PCI Autoconfig: ROM, size=%#x, ", + (unsigned int)bar_size); + if (pciauto_region_allocate(mem, bar_size, &bar_value, + false) == 0) { + dm_pci_write_config32(dev, rom_addr, bar_value); + } + cmdstat |= PCI_COMMAND_MEMORY; + debug("\n"); + } + } + + /* PCI_COMMAND_IO must be set for VGA device */ + dm_pci_read_config16(dev, PCI_CLASS_DEVICE, &class); + if (class == PCI_CLASS_DISPLAY_VGA) + cmdstat |= PCI_COMMAND_IO; + + dm_pci_write_config16(dev, PCI_COMMAND, cmdstat); + dm_pci_write_config8(dev, PCI_CACHE_LINE_SIZE, + CFG_SYS_PCI_CACHE_LINE_SIZE); + dm_pci_write_config8(dev, PCI_LATENCY_TIMER, 0x80); +} + +/* + * Check if the link of a downstream PCIe port operates correctly. + * + * For that check if the optional Data Link Layer Link Active status gets + * on within a 200ms period or failing that wait until the completion of + * that period and check if link training has shown the completed status + * continuously throughout the second half of that period. + * + * Observation with the ASMedia ASM2824 Gen 3 switch indicates it takes + * 11-44ms to indicate the Data Link Layer Link Active status at 2.5GT/s, + * though it may take a couple of link training iterations. + */ +static bool dm_pciauto_exp_link_stable(struct udevice *dev, int pcie_off) +{ + u64 loops = 0, trcount = 0, ntrcount = 0, flips = 0; + bool dllla, lnktr, plnktr; + u16 exp_lnksta; + pci_dev_t bdf; + u64 end; + + dm_pci_read_config16(dev, pcie_off + PCI_EXP_LNKSTA, &exp_lnksta); + plnktr = !!(exp_lnksta & PCI_EXP_LNKSTA_LT); + + end = get_ticks() + usec_to_tick(200000); + do { + dm_pci_read_config16(dev, pcie_off + PCI_EXP_LNKSTA, + &exp_lnksta); + dllla = !!(exp_lnksta & PCI_EXP_LNKSTA_DLLLA); + lnktr = !!(exp_lnksta & PCI_EXP_LNKSTA_LT); + + flips += plnktr ^ lnktr; + if (lnktr) { + ntrcount = 0; + trcount++; + } else { + ntrcount++; + } + loops++; + + plnktr = lnktr; + } while (!dllla && get_ticks() < end); + + bdf = dm_pci_get_bdf(dev); + debug("PCI Autoconfig: %02x.%02x.%02x: Fixup link: DL active: %u; " + "%3llu flips, %6llu loops of which %6llu while training, " + "final %6llu stable\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), + (unsigned int)dllla, + (unsigned long long)flips, (unsigned long long)loops, + (unsigned long long)trcount, (unsigned long long)ntrcount); + + return dllla || ntrcount >= loops / 2; +} + +/* + * Retrain the link of a downstream PCIe port by hand if necessary. + * + * This is needed at least where a downstream port of the ASMedia ASM2824 + * Gen 3 switch is wired to the upstream port of the Pericom PI7C9X2G304 + * Gen 2 switch, and observed with the Delock Riser Card PCI Express x1 > + * 2 x PCIe x1 device, P/N 41433, plugged into the SiFive HiFive Unmatched + * board. + * + * In such a configuration the switches are supposed to negotiate the link + * speed of preferably 5.0GT/s, falling back to 2.5GT/s. However the link + * continues switching between the two speeds indefinitely and the data + * link layer never reaches the active state, with link training reported + * repeatedly active ~84% of the time. Forcing the target link speed to + * 2.5GT/s with the upstream ASM2824 device makes the two switches talk to + * each other correctly however. And more interestingly retraining with a + * higher target link speed afterwards lets the two successfully negotiate + * 5.0GT/s. + * + * As this can potentially happen with any device and is cheap in the case + * of correctly operating hardware, let's do it for all downstream ports, + * for root complexes, PCIe switches and PCI/PCI-X to PCIe bridges. + * + * First check if automatic link training may have failed to complete, as + * indicated by the optional Data Link Layer Link Active status being off + * and the Link Bandwidth Management Status indicating that hardware has + * changed the link speed or width in an attempt to correct unreliable + * link operation. If this is the case, then check if the link operates + * correctly by seeing whether it is being trained excessively. If it is, + * then conclude the link is broken. + * + * In that case restrict the speed to 2.5GT/s, observing that the Target + * Link Speed field is sticky and therefore the link will stay restricted + * even after a device reset is later made by an OS that is unaware of the + * problem. With the speed restricted request that the link be retrained + * and check again if the link operates correctly. If not, then set the + * Target Link Speed back to the original value. + * + * This requires the presence of the Link Control 2 register, so make sure + * the PCI Express Capability Version is at least 2. Also don't try, for + * obvious reasons, to limit the speed if 2.5GT/s is the only link speed + * supported. + */ +static void dm_pciauto_exp_fixup_link(struct udevice *dev, int pcie_off) +{ + u16 exp_lnksta, exp_lnkctl, exp_lnkctl2; + u16 exp_flags, exp_type, exp_version; + u32 exp_lnkcap; + pci_dev_t bdf; + + dm_pci_read_config16(dev, pcie_off + PCI_EXP_FLAGS, &exp_flags); + exp_version = exp_flags & PCI_EXP_FLAGS_VERS; + if (exp_version < 2) + return; + + exp_type = (exp_flags & PCI_EXP_FLAGS_TYPE) >> 4; + switch (exp_type) { + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_PCIE_BRIDGE: + break; + default: + return; + } + + dm_pci_read_config32(dev, pcie_off + PCI_EXP_LNKCAP, &exp_lnkcap); + if ((exp_lnkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) + return; + + dm_pci_read_config16(dev, pcie_off + PCI_EXP_LNKSTA, &exp_lnksta); + if ((exp_lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) != + PCI_EXP_LNKSTA_LBMS) + return; + + if (dm_pciauto_exp_link_stable(dev, pcie_off)) + return; + + bdf = dm_pci_get_bdf(dev); + printf("PCI Autoconfig: %02x.%02x.%02x: " + "Downstream link non-functional\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + printf("PCI Autoconfig: %02x.%02x.%02x: " + "Retrying with speed restricted to 2.5GT/s...\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + dm_pci_read_config16(dev, pcie_off + PCI_EXP_LNKCTL, &exp_lnkctl); + dm_pci_read_config16(dev, pcie_off + PCI_EXP_LNKCTL2, &exp_lnkctl2); + + dm_pci_write_config16(dev, pcie_off + PCI_EXP_LNKCTL2, + (exp_lnkctl2 & ~PCI_EXP_LNKCTL2_TLS) | + PCI_EXP_LNKCTL2_TLS_2_5GT); + dm_pci_write_config16(dev, pcie_off + PCI_EXP_LNKCTL, + exp_lnkctl | PCI_EXP_LNKCTL_RL); + + if (dm_pciauto_exp_link_stable(dev, pcie_off)) { + printf("PCI Autoconfig: %02x.%02x.%02x: Succeeded!\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + } else { + printf("PCI Autoconfig: %02x.%02x.%02x: Failed!\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + dm_pci_write_config16(dev, pcie_off + PCI_EXP_LNKCTL2, + exp_lnkctl2); + dm_pci_write_config16(dev, pcie_off + PCI_EXP_LNKCTL, + exp_lnkctl | PCI_EXP_LNKCTL_RL); + } +} + +void dm_pciauto_prescan_setup_bridge(struct udevice *dev, int sub_bus) +{ + struct pci_region *pci_mem; + struct pci_region *pci_prefetch; + struct pci_region *pci_io; + u16 cmdstat, prefechable_64; + u8 io_32; + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *ctlr_hose = dev_get_uclass_priv(ctlr); + int pcie_off; + + pci_mem = ctlr_hose->pci_mem; + pci_prefetch = ctlr_hose->pci_prefetch; + pci_io = ctlr_hose->pci_io; + + dm_pci_read_config16(dev, PCI_COMMAND, &cmdstat); + dm_pci_read_config16(dev, PCI_PREF_MEMORY_BASE, &prefechable_64); + prefechable_64 &= PCI_PREF_RANGE_TYPE_MASK; + dm_pci_read_config8(dev, PCI_IO_BASE, &io_32); + io_32 &= PCI_IO_RANGE_TYPE_MASK; + + /* Configure bus number registers */ + dm_pci_write_config8(dev, PCI_PRIMARY_BUS, + PCI_BUS(dm_pci_get_bdf(dev)) - dev_seq(ctlr)); + dm_pci_write_config8(dev, PCI_SECONDARY_BUS, sub_bus - dev_seq(ctlr)); + dm_pci_write_config8(dev, PCI_SUBORDINATE_BUS, 0xff); + + if (pci_mem) { + /* Round memory allocator to 1MB boundary */ + pciauto_region_align(pci_mem, 0x100000); + + /* + * Set up memory and I/O filter limits, assume 32-bit + * I/O space + */ + dm_pci_write_config16(dev, PCI_MEMORY_BASE, + ((pci_mem->bus_lower & 0xfff00000) >> 16) & + PCI_MEMORY_RANGE_MASK); + + cmdstat |= PCI_COMMAND_MEMORY; + } + + if (pci_prefetch) { + /* Round memory allocator to 1MB boundary */ + pciauto_region_align(pci_prefetch, 0x100000); + + /* + * Set up memory and I/O filter limits, assume 32-bit + * I/O space + */ + dm_pci_write_config16(dev, PCI_PREF_MEMORY_BASE, + (((pci_prefetch->bus_lower & 0xfff00000) >> 16) & + PCI_PREF_RANGE_MASK) | prefechable_64); + if (prefechable_64 == PCI_PREF_RANGE_TYPE_64) +#ifdef CONFIG_SYS_PCI_64BIT + dm_pci_write_config32(dev, PCI_PREF_BASE_UPPER32, + pci_prefetch->bus_lower >> 32); +#else + dm_pci_write_config32(dev, PCI_PREF_BASE_UPPER32, 0x0); +#endif + + cmdstat |= PCI_COMMAND_MEMORY; + } else { + /* We don't support prefetchable memory for now, so disable */ + dm_pci_write_config16(dev, PCI_PREF_MEMORY_BASE, 0xfff0 | + prefechable_64); + dm_pci_write_config16(dev, PCI_PREF_MEMORY_LIMIT, 0x0 | + prefechable_64); + if (prefechable_64 == PCI_PREF_RANGE_TYPE_64) { + dm_pci_write_config16(dev, PCI_PREF_BASE_UPPER32, 0x0); + dm_pci_write_config16(dev, PCI_PREF_LIMIT_UPPER32, 0x0); + } + } + + if (pci_io) { + /* Round I/O allocator to 4KB boundary */ + pciauto_region_align(pci_io, 0x1000); + + dm_pci_write_config8(dev, PCI_IO_BASE, + (((pci_io->bus_lower & 0x0000f000) >> 8) & + PCI_IO_RANGE_MASK) | io_32); + if (io_32 == PCI_IO_RANGE_TYPE_32) + dm_pci_write_config16(dev, PCI_IO_BASE_UPPER16, + (pci_io->bus_lower & 0xffff0000) >> 16); + + cmdstat |= PCI_COMMAND_IO; + } else { + /* Disable I/O if unsupported */ + dm_pci_write_config8(dev, PCI_IO_BASE, 0xf0 | io_32); + dm_pci_write_config8(dev, PCI_IO_LIMIT, 0x0 | io_32); + if (io_32 == PCI_IO_RANGE_TYPE_32) { + dm_pci_write_config16(dev, PCI_IO_BASE_UPPER16, 0x0); + dm_pci_write_config16(dev, PCI_IO_LIMIT_UPPER16, 0x0); + } + } + + /* For PCIe devices see if we need to retrain the link by hand */ + pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP); + if (pcie_off) + dm_pciauto_exp_fixup_link(dev, pcie_off); + + /* Enable memory and I/O accesses, enable bus master */ + dm_pci_write_config16(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_MASTER); +} + +void dm_pciauto_postscan_setup_bridge(struct udevice *dev, int sub_bus) +{ + struct pci_region *pci_mem; + struct pci_region *pci_prefetch; + struct pci_region *pci_io; + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *ctlr_hose = dev_get_uclass_priv(ctlr); + + pci_mem = ctlr_hose->pci_mem; + pci_prefetch = ctlr_hose->pci_prefetch; + pci_io = ctlr_hose->pci_io; + + /* Configure bus number registers */ + dm_pci_write_config8(dev, PCI_SUBORDINATE_BUS, sub_bus - dev_seq(ctlr)); + + if (pci_mem) { + /* Round memory allocator to 1MB boundary */ + pciauto_region_align(pci_mem, 0x100000); + + dm_pci_write_config16(dev, PCI_MEMORY_LIMIT, + ((pci_mem->bus_lower - 1) >> 16) & + PCI_MEMORY_RANGE_MASK); + } + + if (pci_prefetch) { + u16 prefechable_64; + + dm_pci_read_config16(dev, PCI_PREF_MEMORY_LIMIT, + &prefechable_64); + prefechable_64 &= PCI_PREF_RANGE_TYPE_MASK; + + /* Round memory allocator to 1MB boundary */ + pciauto_region_align(pci_prefetch, 0x100000); + + dm_pci_write_config16(dev, PCI_PREF_MEMORY_LIMIT, + (((pci_prefetch->bus_lower - 1) >> 16) & + PCI_PREF_RANGE_MASK) | prefechable_64); + if (prefechable_64 == PCI_PREF_RANGE_TYPE_64) +#ifdef CONFIG_SYS_PCI_64BIT + dm_pci_write_config32(dev, PCI_PREF_LIMIT_UPPER32, + (pci_prefetch->bus_lower - 1) >> 32); +#else + dm_pci_write_config32(dev, PCI_PREF_LIMIT_UPPER32, 0x0); +#endif + } + + if (pci_io) { + u8 io_32; + + dm_pci_read_config8(dev, PCI_IO_LIMIT, + &io_32); + io_32 &= PCI_IO_RANGE_TYPE_MASK; + + /* Round I/O allocator to 4KB boundary */ + pciauto_region_align(pci_io, 0x1000); + + dm_pci_write_config8(dev, PCI_IO_LIMIT, + ((((pci_io->bus_lower - 1) & 0x0000f000) >> 8) & + PCI_IO_RANGE_MASK) | io_32); + if (io_32 == PCI_IO_RANGE_TYPE_32) + dm_pci_write_config16(dev, PCI_IO_LIMIT_UPPER16, + ((pci_io->bus_lower - 1) & 0xffff0000) >> 16); + } +} + +/* + * HJF: Changed this to return int. I think this is required + * to get the correct result when scanning bridges + */ +int dm_pciauto_config_device(struct udevice *dev) +{ + struct pci_region *pci_mem; + struct pci_region *pci_prefetch; + struct pci_region *pci_io; + unsigned int sub_bus = PCI_BUS(dm_pci_get_bdf(dev)); + unsigned short class; + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *ctlr_hose = dev_get_uclass_priv(ctlr); + int ret; + + pci_mem = ctlr_hose->pci_mem; + pci_prefetch = ctlr_hose->pci_prefetch; + pci_io = ctlr_hose->pci_io; + + dm_pci_read_config16(dev, PCI_CLASS_DEVICE, &class); + + switch (class) { + case PCI_CLASS_BRIDGE_PCI: + debug("PCI Autoconfig: Found P2P bridge, device %d\n", + PCI_DEV(dm_pci_get_bdf(dev))); + + dm_pciauto_setup_device(dev, pci_mem, pci_prefetch, pci_io); + + ret = dm_pci_hose_probe_bus(dev); + if (ret < 0) + return log_msg_ret("probe", ret); + sub_bus = ret; + break; + + case PCI_CLASS_BRIDGE_CARDBUS: + /* + * just do a minimal setup of the bridge, + * let the OS take care of the rest + */ + dm_pciauto_setup_device(dev, pci_mem, pci_prefetch, pci_io); + + debug("PCI Autoconfig: Found P2CardBus bridge, device %d\n", + PCI_DEV(dm_pci_get_bdf(dev))); + + break; + +#if defined(CONFIG_PCIAUTO_SKIP_HOST_BRIDGE) + case PCI_CLASS_BRIDGE_OTHER: + debug("PCI Autoconfig: Skipping bridge device %d\n", + PCI_DEV(dm_pci_get_bdf(dev))); + break; +#endif +#if defined(CONFIG_ARCH_MPC834X) + case PCI_CLASS_BRIDGE_OTHER: + /* + * The host/PCI bridge 1 seems broken in 8349 - it presents + * itself as 'PCI_CLASS_BRIDGE_OTHER' and appears as an _agent_ + * device claiming resources io/mem/irq.. we only allow for + * the PIMMR window to be allocated (BAR0 - 1MB size) + */ + debug("PCI Autoconfig: Broken bridge found, only minimal config\n"); + dm_pciauto_setup_device(dev, 0, hose->pci_mem, + hose->pci_prefetch, hose->pci_io); + break; +#endif + + default: + dm_pciauto_setup_device(dev, pci_mem, pci_prefetch, pci_io); + break; + } + + return sub_bus; +} diff --git a/drivers/pci/pci_auto_common.c b/drivers/pci/pci_auto_common.c new file mode 100644 index 00000000000..cfa818ed821 --- /dev/null +++ b/drivers/pci/pci_auto_common.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCI auto-configuration library + * + * Author: Matt Porter <mporter@mvista.com> + * + * Copyright 2000 MontaVista Software Inc. + * + * Modifications for driver model: + * Copyright 2015 Google, Inc + * Written by Simon Glass <sjg@chromium.org> + */ + +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <pci.h> + +void pciauto_region_init(struct pci_region *res) +{ + /* + * Avoid allocating PCI resources from address 0 -- this is illegal + * according to PCI 2.1 and moreover, this is known to cause Linux IDE + * drivers to fail. Use a reasonable starting value of 0x1000 instead + * if the bus start address is below 0x1000. + */ + res->bus_lower = res->bus_start < 0x1000 ? 0x1000 : res->bus_start; +} + +void pciauto_region_align(struct pci_region *res, pci_size_t size) +{ + res->bus_lower = ((res->bus_lower - 1) | (size - 1)) + 1; +} + +int pciauto_region_allocate(struct pci_region *res, pci_size_t size, + pci_addr_t *bar, bool supports_64bit) +{ + pci_addr_t addr; + + if (!res) { + debug("No resource\n"); + goto error; + } + + addr = ((res->bus_lower - 1) | (size - 1)) + 1; + + if (addr - res->bus_start + size > res->size) { + debug("No room in resource, avail start=%llx / size=%llx, " + "need=%llx\n", (unsigned long long)res->bus_lower, + (unsigned long long)res->size, (unsigned long long)size); + goto error; + } + + if (upper_32_bits(addr) && !supports_64bit) { + debug("Cannot assign 64-bit address to 32-bit-only resource\n"); + goto error; + } + + res->bus_lower = addr + size; + + debug("address=0x%llx bus_lower=0x%llx\n", (unsigned long long)addr, + (unsigned long long)res->bus_lower); + + *bar = addr; + return 0; + + error: + *bar = (pci_addr_t)-1; + return -1; +} + +static void pciauto_show_region(const char *name, struct pci_region *region) +{ + pciauto_region_init(region); + debug("PCI Autoconfig: Bus %s region: [%llx-%llx],\n" + "\t\tPhysical Memory [%llx-%llx]\n", name, + (unsigned long long)region->bus_start, + (unsigned long long)(region->bus_start + region->size - 1), + (unsigned long long)region->phys_start, + (unsigned long long)(region->phys_start + region->size - 1)); +} + +void pciauto_config_init(struct pci_controller *hose) +{ + int i; + + hose->pci_io = NULL; + hose->pci_mem = NULL; + hose->pci_prefetch = NULL; + + for (i = 0; i < hose->region_count; i++) { + switch (hose->regions[i].flags) { + case PCI_REGION_IO: + if (!hose->pci_io || + hose->pci_io->size < hose->regions[i].size) + hose->pci_io = hose->regions + i; + break; + case PCI_REGION_MEM: + if (!hose->pci_mem || + hose->pci_mem->size < hose->regions[i].size) + hose->pci_mem = hose->regions + i; + break; + case (PCI_REGION_MEM | PCI_REGION_PREFETCH): + if (!hose->pci_prefetch || + hose->pci_prefetch->size < hose->regions[i].size) + hose->pci_prefetch = hose->regions + i; + break; + } + } + + + if (hose->pci_mem) + pciauto_show_region("Memory", hose->pci_mem); + if (hose->pci_prefetch) + pciauto_show_region("Prefetchable Mem", hose->pci_prefetch); + if (hose->pci_io) + pciauto_show_region("I/O", hose->pci_io); +} diff --git a/drivers/pci/pci_common.c b/drivers/pci/pci_common.c new file mode 100644 index 00000000000..a57cf11cc53 --- /dev/null +++ b/drivers/pci/pci_common.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2014 Google, Inc + * + * (C) Copyright 2001 Sysgo Real-Time Solutions, GmbH <www.elinos.com> + * Andreas Heppel <aheppel@sysgo.de> + * + * (C) Copyright 2002, 2003 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + */ + +#include <dm.h> +#include <env.h> +#include <errno.h> +#include <pci.h> +#include <asm/io.h> + +const char *pci_class_str(u8 class) +{ + switch (class) { + case PCI_CLASS_NOT_DEFINED: + return "Build before PCI Rev2.0"; + break; + case PCI_BASE_CLASS_STORAGE: + return "Mass storage controller"; + break; + case PCI_BASE_CLASS_NETWORK: + return "Network controller"; + break; + case PCI_BASE_CLASS_DISPLAY: + return "Display controller"; + break; + case PCI_BASE_CLASS_MULTIMEDIA: + return "Multimedia device"; + break; + case PCI_BASE_CLASS_MEMORY: + return "Memory controller"; + break; + case PCI_BASE_CLASS_BRIDGE: + return "Bridge device"; + break; + case PCI_BASE_CLASS_COMMUNICATION: + return "Simple comm. controller"; + break; + case PCI_BASE_CLASS_SYSTEM: + return "Base system peripheral"; + break; + case PCI_BASE_CLASS_INPUT: + return "Input device"; + break; + case PCI_BASE_CLASS_DOCKING: + return "Docking station"; + break; + case PCI_BASE_CLASS_PROCESSOR: + return "Processor"; + break; + case PCI_BASE_CLASS_SERIAL: + return "Serial bus controller"; + break; + case PCI_BASE_CLASS_INTELLIGENT: + return "Intelligent controller"; + break; + case PCI_BASE_CLASS_SATELLITE: + return "Satellite controller"; + break; + case PCI_BASE_CLASS_CRYPT: + return "Cryptographic device"; + break; + case PCI_BASE_CLASS_SIGNAL_PROCESSING: + return "DSP"; + break; + case PCI_CLASS_OTHERS: + return "Does not fit any class"; + break; + default: + return "???"; + break; + }; +} + +__weak int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev) +{ + /* + * Check if pci device should be skipped in configuration + */ + if (dev == PCI_BDF(hose->first_busno, 0, 0)) { +#if defined(CONFIG_PCI_CONFIG_HOST_BRIDGE) /* don't skip host bridge */ + /* + * Only skip configuration if "pciconfighost" is not set + */ + if (env_get("pciconfighost") == NULL) + return 1; +#else + return 1; +#endif + } + + return 0; +} + +#if defined(CONFIG_DM_PCI_COMPAT) +/* Get a virtual address associated with a BAR region */ +void *pci_map_bar(pci_dev_t pdev, int bar, int flags) +{ + pci_addr_t pci_bus_addr; + u32 bar_response; + + /* read BAR address */ + pci_read_config_dword(pdev, bar, &bar_response); + pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); + + /* + * Pass "0" as the length argument to pci_bus_to_virt. The arg + * isn't actualy used on any platform because u-boot assumes a static + * linear mapping. In the future, this could read the BAR size + * and pass that as the size if needed. + */ + return pci_bus_to_virt(pdev, pci_bus_addr, flags, 0, MAP_NOCACHE); +} + +void pci_write_bar32(struct pci_controller *hose, pci_dev_t dev, int barnum, + u32 addr_and_ctrl) +{ + int bar; + + bar = PCI_BASE_ADDRESS_0 + barnum * 4; + pci_hose_write_config_dword(hose, dev, bar, addr_and_ctrl); +} + +u32 pci_read_bar32(struct pci_controller *hose, pci_dev_t dev, int barnum) +{ + u32 addr; + int bar; + + bar = PCI_BASE_ADDRESS_0 + barnum * 4; + pci_hose_read_config_dword(hose, dev, bar, &addr); + if (addr & PCI_BASE_ADDRESS_SPACE_IO) + return addr & PCI_BASE_ADDRESS_IO_MASK; + else + return addr & PCI_BASE_ADDRESS_MEM_MASK; +} + +int __pci_hose_bus_to_phys(struct pci_controller *hose, + pci_addr_t bus_addr, + unsigned long flags, + unsigned long skip_mask, + phys_addr_t *pa) +{ + struct pci_region *res; + int i; + + for (i = 0; i < hose->region_count; i++) { + res = &hose->regions[i]; + + if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) + continue; + + if (res->flags & skip_mask) + continue; + + if (bus_addr >= res->bus_start && + (bus_addr - res->bus_start) < res->size) { + *pa = (bus_addr - res->bus_start + res->phys_start); + return 0; + } + } + + return 1; +} + +phys_addr_t pci_hose_bus_to_phys(struct pci_controller *hose, + pci_addr_t bus_addr, + unsigned long flags) +{ + phys_addr_t phys_addr = 0; + int ret; + + if (!hose) { + puts("pci_hose_bus_to_phys: invalid hose\n"); + return phys_addr; + } + + /* + * if PCI_REGION_MEM is set we do a two pass search with preference + * on matches that don't have PCI_REGION_SYS_MEMORY set + */ + if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { + ret = __pci_hose_bus_to_phys(hose, bus_addr, + flags, PCI_REGION_SYS_MEMORY, &phys_addr); + if (!ret) + return phys_addr; + } + + ret = __pci_hose_bus_to_phys(hose, bus_addr, flags, 0, &phys_addr); + + if (ret) + puts("pci_hose_bus_to_phys: invalid physical address\n"); + + return phys_addr; +} + +int __pci_hose_phys_to_bus(struct pci_controller *hose, + phys_addr_t phys_addr, + unsigned long flags, + unsigned long skip_mask, + pci_addr_t *ba) +{ + struct pci_region *res; + pci_addr_t bus_addr; + int i; + + for (i = 0; i < hose->region_count; i++) { + res = &hose->regions[i]; + + if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) + continue; + + if (res->flags & skip_mask) + continue; + + bus_addr = phys_addr - res->phys_start + res->bus_start; + + if (bus_addr >= res->bus_start && + (bus_addr - res->bus_start) < res->size) { + *ba = bus_addr; + return 0; + } + } + + return 1; +} + +/* + * pci_hose_phys_to_bus(): Convert physical address to bus address + * @hose: PCI hose of the root PCI controller + * @phys_addr: physical address to convert + * @flags: flags of pci regions + * Return: bus address if OK, 0 on error + */ +pci_addr_t pci_hose_phys_to_bus(struct pci_controller *hose, + phys_addr_t phys_addr, + unsigned long flags) +{ + pci_addr_t bus_addr = 0; + int ret; + + if (!hose) { + puts("pci_hose_phys_to_bus: invalid hose\n"); + return bus_addr; + } + + /* + * if PCI_REGION_MEM is set we do a two pass search with preference + * on matches that don't have PCI_REGION_SYS_MEMORY set + */ + if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { + ret = __pci_hose_phys_to_bus(hose, phys_addr, + flags, PCI_REGION_SYS_MEMORY, &bus_addr); + if (!ret) + return bus_addr; + } + + ret = __pci_hose_phys_to_bus(hose, phys_addr, flags, 0, &bus_addr); + + if (ret) + puts("pci_hose_phys_to_bus: invalid physical address\n"); + + return bus_addr; +} + +pci_dev_t pci_find_device(unsigned int vendor, unsigned int device, int index) +{ + struct pci_device_id ids[2] = { {}, {0, 0} }; + + ids[0].vendor = vendor; + ids[0].device = device; + + return pci_find_devices(ids, index); +} + +pci_dev_t pci_hose_find_devices(struct pci_controller *hose, int busnum, + struct pci_device_id *ids, int *indexp) +{ + int found_multi = 0; + u16 vendor, device; + u8 header_type; + pci_dev_t bdf; + int i; + + for (bdf = PCI_BDF(busnum, 0, 0); + bdf < PCI_BDF(busnum + 1, 0, 0); + bdf += PCI_BDF(0, 0, 1)) { + if (pci_skip_dev(hose, bdf)) + continue; + + if (!PCI_FUNC(bdf)) { + pci_read_config_byte(bdf, PCI_HEADER_TYPE, + &header_type); + found_multi = header_type & 0x80; + } else { + if (!found_multi) + continue; + } + + pci_read_config_word(bdf, PCI_VENDOR_ID, &vendor); + pci_read_config_word(bdf, PCI_DEVICE_ID, &device); + + for (i = 0; ids[i].vendor != 0; i++) { + if (vendor == ids[i].vendor && + device == ids[i].device) { + if ((*indexp) <= 0) + return bdf; + + (*indexp)--; + } + } + } + + return -1; +} + +pci_dev_t pci_find_class(uint find_class, int index) +{ + int bus; + int devnum; + pci_dev_t bdf; + uint32_t class; + + for (bus = 0; bus <= pci_last_busno(); bus++) { + for (devnum = 0; devnum < PCI_MAX_PCI_DEVICES - 1; devnum++) { + pci_read_config_dword(PCI_BDF(bus, devnum, 0), + PCI_CLASS_REVISION, &class); + if (class >> 16 == 0xffff) + continue; + + for (bdf = PCI_BDF(bus, devnum, 0); + bdf <= PCI_BDF(bus, devnum, + PCI_MAX_PCI_FUNCTIONS - 1); + bdf += PCI_BDF(0, 0, 1)) { + pci_read_config_dword(bdf, PCI_CLASS_REVISION, + &class); + class >>= 8; + + if (class != find_class) + continue; + /* + * Decrement the index. We want to return the + * correct device, so index is 0 for the first + * matching device, 1 for the second, etc. + */ + if (index) { + index--; + continue; + } + /* Return index'th controller. */ + return bdf; + } + } + } + + return -ENODEV; +} +#endif /* CONFIG_DM_PCI_COMPAT */ diff --git a/drivers/pci/pci_compat.c b/drivers/pci/pci_compat.c new file mode 100644 index 00000000000..8233925e525 --- /dev/null +++ b/drivers/pci/pci_compat.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Compatibility functions for pre-driver-model code + * + * Copyright (C) 2014 Google, Inc + */ +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <dm/device-internal.h> +#include <dm/lists.h> +#include "pci_internal.h" + +#define PCI_HOSE_OP(rw, name, size, type) \ +int pci_hose_##rw##_config_##name(struct pci_controller *hose, \ + pci_dev_t dev, \ + int offset, type value) \ +{ \ + return pci_##rw##_config##size(dev, offset, value); \ +} + +PCI_HOSE_OP(read, byte, 8, u8 *) +PCI_HOSE_OP(read, word, 16, u16 *) +PCI_HOSE_OP(read, dword, 32, u32 *) +PCI_HOSE_OP(write, byte, 8, u8) +PCI_HOSE_OP(write, word, 16, u16) +PCI_HOSE_OP(write, dword, 32, u32) + +pci_dev_t pci_find_devices(struct pci_device_id *ids, int index) +{ + struct udevice *dev; + + if (pci_find_device_id(ids, index, &dev)) + return -1; + return dm_pci_get_bdf(dev); +} + +struct pci_controller *pci_bus_to_hose(int busnum) +{ + struct udevice *bus; + int ret; + + ret = pci_get_bus(busnum, &bus); + if (ret) { + debug("%s: Cannot get bus %d: ret=%d\n", __func__, busnum, ret); + return NULL; + } + + return dev_get_uclass_priv(pci_get_controller(bus)); +} diff --git a/drivers/pci/pci_ftpci100.c b/drivers/pci/pci_ftpci100.c new file mode 100644 index 00000000000..43275b3d6a2 --- /dev/null +++ b/drivers/pci/pci_ftpci100.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <pci.h> +#include <dm.h> +#include <asm/io.h> + +struct ftpci100_data { + void *reg_base; +}; + +/* AHB Control Registers */ +struct ftpci100_ahbc { + u32 iosize; /* 0x00 - I/O Space Size Signal */ + u32 prot; /* 0x04 - AHB Protection */ + u32 rsved[8]; /* 0x08-0x24 - Reserved */ + u32 conf; /* 0x28 - PCI Configuration */ + u32 data; /* 0x2c - PCI Configuration DATA */ +}; + +static int ftpci100_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct ftpci100_data *priv = dev_get_priv(dev); + struct ftpci100_ahbc *regs = priv->reg_base; + u32 data; + + out_le32(®s->conf, PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset)); + data = in_le32(®s->data); + *valuep = pci_conv_32_to_size(data, offset, size); + + return 0; +} + +static int ftpci100_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct ftpci100_data *priv = dev_get_priv(dev); + struct ftpci100_ahbc *regs = priv->reg_base; + u32 data; + + out_le32(®s->conf, PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset)); + + if (size == PCI_SIZE_32) { + data = value; + } else { + u32 old = in_le32(®s->data); + + data = pci_conv_size_to_32(old, value, offset, size); + } + + out_le32(®s->data, data); + + return 0; +} + +static int ftpci100_probe(struct udevice *dev) +{ + struct ftpci100_data *priv = dev_get_priv(dev); + struct pci_region *io, *mem; + int count; + + count = pci_get_regions(dev, &io, &mem, NULL); + if (count != 2) { + printf("%s: wrong count of regions: %d != 2\n", dev->name, count); + return -EINVAL; + } + + priv->reg_base = phys_to_virt(io->phys_start); + if (!priv->reg_base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops ftpci100_ops = { + .read_config = ftpci100_read_config, + .write_config = ftpci100_write_config, +}; + +static const struct udevice_id ftpci100_ids[] = { + { .compatible = "faraday,ftpci100" }, + { } +}; + +U_BOOT_DRIVER(ftpci100_pci) = { + .name = "ftpci100_pci", + .id = UCLASS_PCI, + .of_match = ftpci100_ids, + .ops = &ftpci100_ops, + .probe = ftpci100_probe, + .priv_auto = sizeof(struct ftpci100_data), +}; diff --git a/drivers/pci/pci_gt64120.c b/drivers/pci/pci_gt64120.c new file mode 100644 index 00000000000..2c2a80eeaa0 --- /dev/null +++ b/drivers/pci/pci_gt64120.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 Gabor Juhos <juhosg@openwrt.org> + * + * Based on the Linux implementation. + * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc. + * Authors: Carsten Langgaard <carstenl@mips.com> + * Maciej W. Rozycki <macro@mips.com> + */ + +#include <dm.h> +#include <gt64120.h> +#include <init.h> +#include <log.h> +#include <pci.h> +#include <pci_gt64120.h> + +#include <asm/io.h> + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +struct gt64120_regs { + u8 unused_000[0xc18]; + u32 intrcause; + u8 unused_c1c[0x0dc]; + u32 pci0_cfgaddr; + u32 pci0_cfgdata; +}; + +struct gt64120_pci_controller { + struct pci_controller hose; + struct gt64120_regs *regs; +}; + +static inline struct gt64120_pci_controller * +hose_to_gt64120(struct pci_controller *hose) +{ + return container_of(hose, struct gt64120_pci_controller, hose); +} + +#define GT_INTRCAUSE_ABORT_BITS \ + (GT_INTRCAUSE_MASABORT0_BIT | GT_INTRCAUSE_TARABORT0_BIT) + +static int gt_config_access(struct gt64120_pci_controller *gt, + unsigned char access_type, pci_dev_t bdf, + int where, u32 *data) +{ + unsigned int bus = PCI_BUS(bdf); + unsigned int dev = PCI_DEV(bdf); + unsigned int func = PCI_FUNC(bdf); + u32 intr; + u32 addr; + u32 val; + + if (bus == 0 && dev >= 31) { + /* Because of a bug in the galileo (for slot 31). */ + return -1; + } + + if (access_type == PCI_ACCESS_WRITE) + debug("PCI WR %02x:%02x.%x reg:%02d data:%08x\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), where, *data); + + /* Clear cause register bits */ + writel(~GT_INTRCAUSE_ABORT_BITS, >->regs->intrcause); + + addr = PCI_CONF1_ADDRESS(bus, dev, func, where); + + /* Setup address */ + writel(addr, >->regs->pci0_cfgaddr); + + if (access_type == PCI_ACCESS_WRITE) { + if (bus == 0 && dev == 0) { + /* + * The Galileo system controller is acting + * differently than other devices. + */ + val = *data; + } else { + val = cpu_to_le32(*data); + } + + writel(val, >->regs->pci0_cfgdata); + } else { + val = readl(>->regs->pci0_cfgdata); + + if (bus == 0 && dev == 0) { + /* + * The Galileo system controller is acting + * differently than other devices. + */ + *data = val; + } else { + *data = le32_to_cpu(val); + } + } + + /* Check for master or target abort */ + intr = readl(>->regs->intrcause); + if (intr & GT_INTRCAUSE_ABORT_BITS) { + /* Error occurred, clear abort bits */ + writel(~GT_INTRCAUSE_ABORT_BITS, >->regs->intrcause); + return -1; + } + + if (access_type == PCI_ACCESS_READ) + debug("PCI RD %02x:%02x.%x reg:%02d data:%08x\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), where, *data); + + return 0; +} + +static int gt64120_pci_read_config(const struct udevice *dev, pci_dev_t bdf, + uint where, ulong *val, + enum pci_size_t size) +{ + struct gt64120_pci_controller *gt = dev_get_priv(dev); + u32 data = 0; + + if (gt_config_access(gt, PCI_ACCESS_READ, bdf, where, &data)) { + *val = pci_get_ff(size); + return 0; + } + + *val = pci_conv_32_to_size(data, where, size); + + return 0; +} + +static int gt64120_pci_write_config(struct udevice *dev, pci_dev_t bdf, + uint where, ulong val, + enum pci_size_t size) +{ + struct gt64120_pci_controller *gt = dev_get_priv(dev); + u32 data = 0; + + if (size == PCI_SIZE_32) { + data = val; + } else { + u32 old; + + if (gt_config_access(gt, PCI_ACCESS_READ, bdf, where, &old)) + return 0; + + data = pci_conv_size_to_32(old, val, where, size); + } + + gt_config_access(gt, PCI_ACCESS_WRITE, bdf, where, &data); + + return 0; +} + +static int gt64120_pci_probe(struct udevice *dev) +{ + struct gt64120_pci_controller *gt = dev_get_priv(dev); + + gt->regs = dev_remap_addr(dev); + if (!gt->regs) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops gt64120_pci_ops = { + .read_config = gt64120_pci_read_config, + .write_config = gt64120_pci_write_config, +}; + +static const struct udevice_id gt64120_pci_ids[] = { + { .compatible = "marvell,pci-gt64120" }, + { } +}; + +U_BOOT_DRIVER(gt64120_pci) = { + .name = "gt64120_pci", + .id = UCLASS_PCI, + .of_match = gt64120_pci_ids, + .ops = >64120_pci_ops, + .probe = gt64120_pci_probe, + .priv_auto = sizeof(struct gt64120_pci_controller), +}; diff --git a/drivers/pci/pci_internal.h b/drivers/pci/pci_internal.h new file mode 100644 index 00000000000..5a4c90e619d --- /dev/null +++ b/drivers/pci/pci_internal.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Internal PCI functions, not exported outside drivers/pci + * + * Copyright (c) 2015 Google, Inc + * Written by Simon Glass <sjg@chromium.org> + */ + +#ifndef __pci_internal_h +#define __pci_internal_h + +/** + * dm_pciauto_prescan_setup_bridge() - Set up a bridge for scanning + * + * This gets a bridge ready so that its downstream devices can be scanned. + * It sets up the bus number and memory range registers. Once the scan is + * completed, dm_pciauto_postscan_setup_bridge() should be called. + * + * @dev: Bridge device to be scanned + * @sub_bus: Bus number of the 'other side' of the bridge + */ +void dm_pciauto_prescan_setup_bridge(struct udevice *dev, int sub_bus); + +/** + * dm_pciauto_postscan_setup_bridge() - Finish set up of a bridge after scanning + * + * This should be called after a bus scan is complete. It adjusts the memory + * ranges to fit with the devices actually found on the other side (downstream) + * of the bridge. + * + * @dev: Bridge device that was scanned + * @sub_bus: Bus number of the 'other side' of the bridge + */ +void dm_pciauto_postscan_setup_bridge(struct udevice *dev, int sub_bus); + +/** + * dm_pciauto_config_device() - Configure a PCI device ready for use + * + * If the device is a bridge, downstream devices will be probed. + * + * @dev: Device to configure + * Return: the maximum PCI bus number found by this device. If there are no + * bridges, this just returns the device's bus number. If the device is a + * bridge then it will return a larger number, depending on the devices on + * that bridge. On error, returns a -ve error number. + */ +int dm_pciauto_config_device(struct udevice *dev); + +/** + * pci_get_bus() - Get a pointer to a bus, given its number + * + * This looks up a PCI bus based on its bus number. The bus is probed if + * necessary. + * + * @busnum: PCI bus number to look up + * @busp: Returns PCI bus on success + * Return: 0 on success, or -ve error + */ +int pci_get_bus(int busnum, struct udevice **busp); + +#endif diff --git a/drivers/pci/pci_mpc85xx.c b/drivers/pci/pci_mpc85xx.c new file mode 100644 index 00000000000..c07feba7976 --- /dev/null +++ b/drivers/pci/pci_mpc85xx.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 2019 + * Heiko Schocher, DENX Software Engineering, hs@denx.de. + * + */ +#include <asm/bitops.h> +#include <pci.h> +#include <dm.h> +#include <asm/fsl_law.h> + +struct mpc85xx_pci_priv { + void __iomem *cfg_addr; + void __iomem *cfg_data; +}; + +static int mpc85xx_pci_dm_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *value, + enum pci_size_t size) +{ + struct mpc85xx_pci_priv *priv = dev_get_priv(dev); + u32 addr; + + if (offset > 0xff) { + *value = pci_get_ff(size); + return 0; + } + + /* Skip mpc85xx PCI controller's ATMU inbound registers */ + if (PCI_BUS(bdf) == 0 && PCI_DEV(bdf) == 0 && PCI_FUNC(bdf) == 0 && + (offset & ~3) >= PCI_BASE_ADDRESS_0 && (offset & ~3) <= PCI_BASE_ADDRESS_5) { + *value = 0; + return 0; + } + + addr = PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset); + out_be32(priv->cfg_addr, addr); + sync(); + + switch (size) { + case PCI_SIZE_8: + *value = in_8(priv->cfg_data + (offset & 3)); + break; + case PCI_SIZE_16: + *value = in_le16(priv->cfg_data + (offset & 2)); + break; + case PCI_SIZE_32: + *value = in_le32(priv->cfg_data); + break; + } + + return 0; +} + +static int mpc85xx_pci_dm_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct mpc85xx_pci_priv *priv = dev_get_priv(dev); + u32 addr; + + if (offset > 0xff) + return 0; + + /* Skip mpc85xx PCI controller's ATMU inbound registers */ + if (PCI_BUS(bdf) == 0 && PCI_DEV(bdf) == 0 && PCI_FUNC(bdf) == 0 && + (offset & ~3) >= PCI_BASE_ADDRESS_0 && (offset & ~3) <= PCI_BASE_ADDRESS_5) + return 0; + + addr = PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset); + out_be32(priv->cfg_addr, addr); + sync(); + + switch (size) { + case PCI_SIZE_8: + out_8(priv->cfg_data + (offset & 3), value); + break; + case PCI_SIZE_16: + out_le16(priv->cfg_data + (offset & 2), value); + break; + case PCI_SIZE_32: + out_le32(priv->cfg_data, value); + break; + } + sync(); + + return 0; +} + +#ifdef CONFIG_FSL_LAW +static int +mpc85xx_pci_dm_setup_laws(struct pci_region *io, struct pci_region *mem, + struct pci_region *pre) +{ + /* + * Unfortunately we have defines for this addresse, + * as we have to setup the TLB, and at this stage + * we have no access to DT ... may we check here + * if the value in the define is the same ? + */ + if (mem) + set_next_law(mem->phys_start, law_size_bits(mem->size), + LAW_TRGT_IF_PCI); + if (io) + set_next_law(io->phys_start, law_size_bits(io->size), + LAW_TRGT_IF_PCI); + if (pre) + set_next_law(pre->phys_start, law_size_bits(pre->size), + LAW_TRGT_IF_PCI); + + return 0; +} +#endif + +static int mpc85xx_pci_dm_probe(struct udevice *dev) +{ + struct mpc85xx_pci_priv *priv = dev_get_priv(dev); + struct pci_region *io; + struct pci_region *mem; + struct pci_region *pre; + int count; + ccsr_pcix_t *pcix; + + count = pci_get_regions(dev, &io, &mem, &pre); + if (count != 2) { + printf("%s: wrong count of regions %d only 2 allowed\n", + __func__, count); + return -EINVAL; + } + +#ifdef CONFIG_FSL_LAW + mpc85xx_pci_dm_setup_laws(io, mem, pre); +#endif + + pcix = priv->cfg_addr; + /* BAR 1: memory */ + out_be32(&pcix->potar1, mem->bus_start >> 12); + out_be32(&pcix->potear1, (u64)mem->bus_start >> 44); + out_be32(&pcix->powbar1, mem->phys_start >> 12); + out_be32(&pcix->powbear1, (u64)mem->phys_start >> 44); + out_be32(&pcix->powar1, (POWAR_EN | POWAR_MEM_READ | + POWAR_MEM_WRITE | (__ilog2(mem->size) - 1))); + + /* BAR 1: IO */ + out_be32(&pcix->potar2, io->bus_start >> 12); + out_be32(&pcix->potear2, (u64)io->bus_start >> 44); + out_be32(&pcix->powbar2, io->phys_start >> 12); + out_be32(&pcix->powbear2, (u64)io->phys_start >> 44); + out_be32(&pcix->powar2, (POWAR_EN | POWAR_IO_READ | + POWAR_IO_WRITE | (__ilog2(io->size) - 1))); + + out_be32(&pcix->pitar1, 0); + out_be32(&pcix->piwbar1, 0); + out_be32(&pcix->piwar1, (PIWAR_EN | PIWAR_PF | PIWAR_LOCAL | + PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP | PIWAR_MEM_2G)); + + out_be32(&pcix->powar3, 0); + out_be32(&pcix->powar4, 0); + out_be32(&pcix->piwar2, 0); + out_be32(&pcix->piwar3, 0); + + return 0; +} + +static int mpc85xx_pci_dm_remove(struct udevice *dev) +{ + return 0; +} + +static int mpc85xx_pci_of_to_plat(struct udevice *dev) +{ + struct mpc85xx_pci_priv *priv = dev_get_priv(dev); + fdt_addr_t addr; + + addr = devfdt_get_addr_index(dev, 0); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + priv->cfg_addr = (void __iomem *)map_physmem(addr, 0, MAP_NOCACHE); + priv->cfg_data = (void __iomem *)((ulong)priv->cfg_addr + 4); + + return 0; +} + +static const struct dm_pci_ops mpc85xx_pci_ops = { + .read_config = mpc85xx_pci_dm_read_config, + .write_config = mpc85xx_pci_dm_write_config, +}; + +static const struct udevice_id mpc85xx_pci_ids[] = { + { .compatible = "fsl,mpc8540-pci" }, + { } +}; + +U_BOOT_DRIVER(mpc85xx_pci) = { + .name = "mpc85xx_pci", + .id = UCLASS_PCI, + .of_match = mpc85xx_pci_ids, + .ops = &mpc85xx_pci_ops, + .probe = mpc85xx_pci_dm_probe, + .remove = mpc85xx_pci_dm_remove, + .of_to_plat = mpc85xx_pci_of_to_plat, + .priv_auto = sizeof(struct mpc85xx_pci_priv), +}; diff --git a/drivers/pci/pci_msc01.c b/drivers/pci/pci_msc01.c new file mode 100644 index 00000000000..8d363d60498 --- /dev/null +++ b/drivers/pci/pci_msc01.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@mips.com> + */ + +#include <dm.h> +#include <init.h> +#include <msc01.h> +#include <pci.h> +#include <pci_msc01.h> +#include <asm/io.h> + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +struct msc01_pci_controller { + struct pci_controller hose; + void *base; +}; + +static inline struct msc01_pci_controller * +hose_to_msc01(struct pci_controller *hose) +{ + return container_of(hose, struct msc01_pci_controller, hose); +} + +static int msc01_config_access(struct msc01_pci_controller *msc01, + unsigned char access_type, pci_dev_t bdf, + int where, u32 *data) +{ + const u32 aborts = MSC01_PCI_INTSTAT_MA_MSK | MSC01_PCI_INTSTAT_TA_MSK; + void *intstat = msc01->base + MSC01_PCI_INTSTAT_OFS; + void *cfgdata = msc01->base + MSC01_PCI_CFGDATA_OFS; + unsigned int bus = PCI_BUS(bdf); + unsigned int dev = PCI_DEV(bdf); + unsigned int func = PCI_FUNC(bdf); + + /* clear abort status */ + __raw_writel(aborts, intstat); + + /* setup address */ + __raw_writel((PCI_CONF1_ADDRESS(bus, dev, func, where) & ~PCI_CONF1_ENABLE), + msc01->base + MSC01_PCI_CFGADDR_OFS); + + /* perform access */ + if (access_type == PCI_ACCESS_WRITE) + __raw_writel(*data, cfgdata); + else + *data = __raw_readl(cfgdata); + + /* check for aborts */ + if (__raw_readl(intstat) & aborts) { + /* clear abort status */ + __raw_writel(aborts, intstat); + return -1; + } + + return 0; +} + +static int msc01_pci_read_config(const struct udevice *dev, pci_dev_t bdf, + uint where, ulong *val, enum pci_size_t size) +{ + struct msc01_pci_controller *msc01 = dev_get_priv(dev); + u32 data = 0; + + if (msc01_config_access(msc01, PCI_ACCESS_READ, bdf, where, &data)) { + *val = pci_get_ff(size); + return 0; + } + + *val = pci_conv_32_to_size(data, where, size); + + return 0; +} + +static int msc01_pci_write_config(struct udevice *dev, pci_dev_t bdf, + uint where, ulong val, enum pci_size_t size) +{ + struct msc01_pci_controller *msc01 = dev_get_priv(dev); + u32 data = 0; + + if (size == PCI_SIZE_32) { + data = val; + } else { + u32 old; + + if (msc01_config_access(msc01, PCI_ACCESS_READ, bdf, where, &old)) + return 0; + + data = pci_conv_size_to_32(old, val, where, size); + } + + msc01_config_access(msc01, PCI_ACCESS_WRITE, bdf, where, &data); + + return 0; +} + +static int msc01_pci_probe(struct udevice *dev) +{ + struct msc01_pci_controller *msc01 = dev_get_priv(dev); + + msc01->base = dev_remap_addr(dev); + if (!msc01->base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops msc01_pci_ops = { + .read_config = msc01_pci_read_config, + .write_config = msc01_pci_write_config, +}; + +static const struct udevice_id msc01_pci_ids[] = { + { .compatible = "mips,pci-msc01" }, + { } +}; + +U_BOOT_DRIVER(msc01_pci) = { + .name = "msc01_pci", + .id = UCLASS_PCI, + .of_match = msc01_pci_ids, + .ops = &msc01_pci_ops, + .probe = msc01_pci_probe, + .priv_auto = sizeof(struct msc01_pci_controller), +}; diff --git a/drivers/pci/pci_mvebu.c b/drivers/pci/pci_mvebu.c new file mode 100644 index 00000000000..77815513b76 --- /dev/null +++ b/drivers/pci/pci_mvebu.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe driver for Marvell MVEBU SoCs + * + * Based on Barebox drivers/pci/pci-mvebu.c + * + * Ported to U-Boot by: + * Anton Schubert <anton.schubert@gmx.de> + * Stefan Roese <sr@denx.de> + * Pali Rohár <pali@kernel.org> + */ + +#include <dm.h> +#include <log.h> +#include <malloc.h> +#include <dm/device-internal.h> +#include <dm/lists.h> +#include <dm/of_access.h> +#include <pci.h> +#include <reset.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <asm/gpio.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/mbus.h> +#include <linux/printk.h> +#include <linux/sizes.h> + +/* PCIe unit register offsets */ +#define MVPCIE_ROOT_PORT_PCI_CFG_OFF 0x0000 +#define MVPCIE_ROOT_PORT_PCI_EXP_OFF 0x0060 +#define MVPCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) +#define MVPCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define MVPCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) +#define MVPCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) +#define MVPCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) +#define MVPCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) +#define MVPCIE_WIN5_CTRL_OFF 0x1880 +#define MVPCIE_WIN5_BASE_OFF 0x1884 +#define MVPCIE_WIN5_REMAP_OFF 0x188c +#define MVPCIE_CONF_ADDR_OFF 0x18f8 +#define MVPCIE_CONF_DATA_OFF 0x18fc +#define MVPCIE_CTRL_OFF 0x1a00 +#define MVPCIE_CTRL_RC_MODE BIT(1) +#define MVPCIE_STAT_OFF 0x1a04 +#define MVPCIE_STAT_BUS (0xff << 8) +#define MVPCIE_STAT_DEV (0x1f << 16) +#define MVPCIE_STAT_LINK_DOWN BIT(0) + +#define LINK_WAIT_RETRIES 100 +#define LINK_WAIT_TIMEOUT 1000 + +struct mvebu_pcie { + struct pci_controller hose; + void __iomem *base; + void __iomem *membase; + struct resource mem; + void __iomem *iobase; + struct resource io; + struct gpio_desc reset_gpio; + u32 intregs; + u32 port; + u32 lane; + bool is_x4; + int devfn; + int sec_busno; + char name[16]; + unsigned int mem_target; + unsigned int mem_attr; + unsigned int io_target; + unsigned int io_attr; + u32 cfgcache[(0x3c - 0x10) / 4]; +}; + +static inline bool mvebu_pcie_link_up(struct mvebu_pcie *pcie) +{ + u32 val; + val = readl(pcie->base + MVPCIE_STAT_OFF); + return !(val & MVPCIE_STAT_LINK_DOWN); +} + +static void mvebu_pcie_wait_for_link(struct mvebu_pcie *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_RETRIES; retries++) { + if (mvebu_pcie_link_up(pcie)) { + printf("%s: Link up\n", pcie->name); + return; + } + + udelay(LINK_WAIT_TIMEOUT); + } + + printf("%s: Link down\n", pcie->name); +} + +static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie *pcie, int busno) +{ + u32 stat; + + stat = readl(pcie->base + MVPCIE_STAT_OFF); + stat &= ~MVPCIE_STAT_BUS; + stat |= busno << 8; + writel(stat, pcie->base + MVPCIE_STAT_OFF); +} + +static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie *pcie, int devno) +{ + u32 stat; + + stat = readl(pcie->base + MVPCIE_STAT_OFF); + stat &= ~MVPCIE_STAT_DEV; + stat |= devno << 16; + writel(stat, pcie->base + MVPCIE_STAT_OFF); +} + +static inline struct mvebu_pcie *hose_to_pcie(struct pci_controller *hose) +{ + return container_of(hose, struct mvebu_pcie, hose); +} + +static bool mvebu_pcie_valid_addr(struct mvebu_pcie *pcie, + int busno, int dev, int func) +{ + /* On the root bus is only one PCI Bridge */ + if (busno == 0 && (dev != 0 || func != 0)) + return false; + + /* Access to other buses is possible when link is up */ + if (busno != 0 && !mvebu_pcie_link_up(pcie)) + return false; + + /* On secondary bus can be only one PCIe device */ + if (busno == pcie->sec_busno && dev != 0) + return false; + + return true; +} + +static int mvebu_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct mvebu_pcie *pcie = dev_get_plat(bus); + int busno = PCI_BUS(bdf) - dev_seq(bus); + u32 addr, data; + + debug("PCIE CFG read: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + if (!mvebu_pcie_valid_addr(pcie, busno, PCI_DEV(bdf), PCI_FUNC(bdf))) { + debug("- out of range\n"); + *valuep = pci_get_ff(size); + return 0; + } + + /* + * The configuration space of the PCI Bridge on the root bus (zero) is + * of Type 0 but the BAR registers (including ROM BAR) don't have the + * same meaning as in the PCIe specification. Therefore do not access + * BAR registers and non-common registers (those which have different + * meaning for Type 0 and Type 1 config space) of the PCI Bridge and + * instead read their content from driver virtual cfgcache[]. + */ + if (busno == 0 && ((offset >= 0x10 && offset < 0x34) || + (offset >= 0x38 && offset < 0x3c))) { + data = pcie->cfgcache[(offset - 0x10) / 4]; + debug("(addr,size,val)=(0x%04x, %d, 0x%08x) from cfgcache\n", + offset, size, data); + *valuep = pci_conv_32_to_size(data, offset, size); + return 0; + } + + /* + * PCI bridge is device 0 at the root bus (zero) but mvebu has it + * mapped on secondary bus with device number 1. + */ + if (busno == 0) + addr = PCI_CONF1_EXT_ADDRESS(pcie->sec_busno, 1, 0, offset); + else + addr = PCI_CONF1_EXT_ADDRESS(busno, PCI_DEV(bdf), PCI_FUNC(bdf), offset); + + /* write address */ + writel(addr, pcie->base + MVPCIE_CONF_ADDR_OFF); + + /* read data */ + switch (size) { + case PCI_SIZE_8: + data = readb(pcie->base + MVPCIE_CONF_DATA_OFF + (offset & 3)); + break; + case PCI_SIZE_16: + data = readw(pcie->base + MVPCIE_CONF_DATA_OFF + (offset & 2)); + break; + case PCI_SIZE_32: + data = readl(pcie->base + MVPCIE_CONF_DATA_OFF); + break; + default: + return -EINVAL; + } + + if (busno == 0 && (offset & ~3) == (PCI_HEADER_TYPE & ~3)) { + /* + * Change Header Type of PCI Bridge device to Type 1 + * (0x01, used by PCI Bridges) because mvebu reports + * Type 0 (0x00, used by Upstream and Endpoint devices). + */ + data = pci_conv_size_to_32(data, 0, offset, size); + data &= ~0x007f0000; + data |= PCI_HEADER_TYPE_BRIDGE << 16; + data = pci_conv_32_to_size(data, offset, size); + } + + debug("(addr,size,val)=(0x%04x, %d, 0x%08x)\n", offset, size, data); + *valuep = data; + + return 0; +} + +static int mvebu_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct mvebu_pcie *pcie = dev_get_plat(bus); + int busno = PCI_BUS(bdf) - dev_seq(bus); + u32 addr, data; + + debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + debug("(addr,size,val)=(0x%04x, %d, 0x%08lx)\n", offset, size, value); + + if (!mvebu_pcie_valid_addr(pcie, busno, PCI_DEV(bdf), PCI_FUNC(bdf))) { + debug("- out of range\n"); + return 0; + } + + /* + * As explained in mvebu_pcie_read_config(), PCI Bridge Type 1 specific + * config registers are not available, so we write their content only + * into driver virtual cfgcache[]. + * And as explained in mvebu_pcie_probe(), mvebu has its own specific + * way for configuring secondary bus number. + */ + if (busno == 0 && ((offset >= 0x10 && offset < 0x34) || + (offset >= 0x38 && offset < 0x3c))) { + debug("Writing to cfgcache only\n"); + data = pcie->cfgcache[(offset - 0x10) / 4]; + data = pci_conv_size_to_32(data, value, offset, size); + /* mvebu PCI bridge does not have configurable bars */ + if ((offset & ~3) == PCI_BASE_ADDRESS_0 || + (offset & ~3) == PCI_BASE_ADDRESS_1 || + (offset & ~3) == PCI_ROM_ADDRESS1) + data = 0x0; + pcie->cfgcache[(offset - 0x10) / 4] = data; + /* mvebu has its own way how to set PCI secondary bus number */ + if (offset == PCI_SECONDARY_BUS || + (offset == PCI_PRIMARY_BUS && size != PCI_SIZE_8)) { + pcie->sec_busno = (data >> 8) & 0xff; + mvebu_pcie_set_local_bus_nr(pcie, pcie->sec_busno); + debug("Secondary bus number was changed to %d\n", + pcie->sec_busno); + } + return 0; + } + + /* + * PCI bridge is device 0 at the root bus (zero) but mvebu has it + * mapped on secondary bus with device number 1. + */ + if (busno == 0) + addr = PCI_CONF1_EXT_ADDRESS(pcie->sec_busno, 1, 0, offset); + else + addr = PCI_CONF1_EXT_ADDRESS(busno, PCI_DEV(bdf), PCI_FUNC(bdf), offset); + + /* write address */ + writel(addr, pcie->base + MVPCIE_CONF_ADDR_OFF); + + /* write data */ + switch (size) { + case PCI_SIZE_8: + writeb(value, pcie->base + MVPCIE_CONF_DATA_OFF + (offset & 3)); + break; + case PCI_SIZE_16: + writew(value, pcie->base + MVPCIE_CONF_DATA_OFF + (offset & 2)); + break; + case PCI_SIZE_32: + writel(value, pcie->base + MVPCIE_CONF_DATA_OFF); + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Setup PCIE BARs and Address Decode Wins: + * BAR[0] -> internal registers + * BAR[1] -> covers all DRAM banks + * BAR[2] -> disabled + * WIN[0-3] -> DRAM bank[0-3] + */ +static void mvebu_pcie_setup_wins(struct mvebu_pcie *pcie) +{ + const struct mbus_dram_target_info *dram = mvebu_mbus_dram_info(); + u32 size; + int i; + + /* First, disable and clear BARs and windows. */ + for (i = 1; i < 3; i++) { + writel(0, pcie->base + MVPCIE_BAR_CTRL_OFF(i)); + writel(0, pcie->base + MVPCIE_BAR_LO_OFF(i)); + writel(0, pcie->base + MVPCIE_BAR_HI_OFF(i)); + } + + for (i = 0; i < 5; i++) { + writel(0, pcie->base + MVPCIE_WIN04_CTRL_OFF(i)); + writel(0, pcie->base + MVPCIE_WIN04_BASE_OFF(i)); + writel(0, pcie->base + MVPCIE_WIN04_REMAP_OFF(i)); + } + + writel(0, pcie->base + MVPCIE_WIN5_CTRL_OFF); + writel(0, pcie->base + MVPCIE_WIN5_BASE_OFF); + writel(0, pcie->base + MVPCIE_WIN5_REMAP_OFF); + + /* Setup windows for DDR banks. Count total DDR size on the fly. */ + size = 0; + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(cs->base & 0xffff0000, + pcie->base + MVPCIE_WIN04_BASE_OFF(i)); + writel(0, pcie->base + MVPCIE_WIN04_REMAP_OFF(i)); + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + pcie->base + MVPCIE_WIN04_CTRL_OFF(i)); + + size += cs->size; + } + + /* Round up 'size' to the nearest power of two. */ + if ((size & (size - 1)) != 0) + size = 1 << fls(size); + + /* Setup BAR[1] to all DRAM banks. */ + writel(dram->cs[0].base | 0xc, pcie->base + MVPCIE_BAR_LO_OFF(1)); + writel(0, pcie->base + MVPCIE_BAR_HI_OFF(1)); + writel(((size - 1) & 0xffff0000) | 0x1, + pcie->base + MVPCIE_BAR_CTRL_OFF(1)); + + /* Setup BAR[0] to internal registers. */ + writel(pcie->intregs, pcie->base + MVPCIE_BAR_LO_OFF(0)); + writel(0, pcie->base + MVPCIE_BAR_HI_OFF(0)); +} + +/* Only enable PCIe link, do not setup it */ +static int mvebu_pcie_enable_link(struct mvebu_pcie *pcie, ofnode node) +{ + struct reset_ctl rst; + int ret; + + ret = reset_get_by_index_nodev(node, 0, &rst); + if (ret == -ENOENT) { + return 0; + } else if (ret < 0) { + printf("%s: cannot get reset controller: %d\n", pcie->name, ret); + return ret; + } + + ret = reset_request(&rst); + if (ret) { + printf("%s: cannot request reset controller: %d\n", pcie->name, ret); + return ret; + } + + ret = reset_deassert(&rst); + reset_free(&rst); + if (ret) { + printf("%s: cannot enable PCIe port: %d\n", pcie->name, ret); + return ret; + } + + return 0; +} + +/* Setup PCIe link but do not enable it */ +static void mvebu_pcie_setup_link(struct mvebu_pcie *pcie) +{ + u32 reg; + + /* Setup PCIe controller to Root Complex mode */ + reg = readl(pcie->base + MVPCIE_CTRL_OFF); + reg |= MVPCIE_CTRL_RC_MODE; + writel(reg, pcie->base + MVPCIE_CTRL_OFF); + + /* + * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link + * Capability register. This register is defined by PCIe specification + * as read-only but this mvebu controller has it as read-write and must + * be set to number of SerDes PCIe lanes (1 or 4). If this register is + * not set correctly then link with endpoint card is not established. + */ + reg = readl(pcie->base + MVPCIE_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_LNKCAP); + reg &= ~PCI_EXP_LNKCAP_MLW; + reg |= (pcie->is_x4 ? 4 : 1) << 4; + writel(reg, pcie->base + MVPCIE_ROOT_PORT_PCI_EXP_OFF + PCI_EXP_LNKCAP); +} + +static int mvebu_pcie_probe(struct udevice *dev) +{ + struct mvebu_pcie *pcie = dev_get_plat(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + u32 reg; + int ret; + + /* Request for optional PERST# GPIO */ + ret = gpio_request_by_name(dev, "reset-gpios", 0, &pcie->reset_gpio, GPIOD_IS_OUT); + if (ret && ret != -ENOENT) { + printf("%s: unable to request reset-gpios: %d\n", pcie->name, ret); + return ret; + } + + /* + * Change Class Code of PCI Bridge device to PCI Bridge (0x600400) + * because default value is Memory controller (0x508000) which + * U-Boot cannot recognize as P2P Bridge. + * + * Note that this mvebu PCI Bridge does not have compliant Type 1 + * Configuration Space. Header Type is reported as Type 0 and it + * has format of Type 0 config space. + * + * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) + * have the same format in Marvell's specification as in PCIe + * specification, but their meaning is totally different and they do + * different things: they are aliased into internal mvebu registers + * (e.g. MVPCIE_BAR_LO_OFF) and these should not be changed or + * reconfigured by pci device drivers. + * + * So our driver converts Type 0 config space to Type 1 and reports + * Header Type as Type 1. Access to BAR registers and to non-existent + * Type 1 registers is redirected to the virtual cfgcache[] buffer, + * which avoids changing unrelated registers. + */ + reg = readl(pcie->base + MVPCIE_ROOT_PORT_PCI_CFG_OFF + PCI_CLASS_REVISION); + reg &= ~0xffffff00; + reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + writel(reg, pcie->base + MVPCIE_ROOT_PORT_PCI_CFG_OFF + PCI_CLASS_REVISION); + + /* + * mvebu uses local bus number and local device number to determinate + * type of config request. Type 0 is used if target bus number equals + * local bus number and target device number differs from local device + * number. Type 1 is used if target bus number differs from local bus + * number. And when target bus number equals local bus number and + * target device equals local device number then request is routed to + * PCI Bridge which represent local PCIe Root Port. + * + * It means that PCI root and secondary buses shares one bus number + * which is configured via local bus number. Determination if config + * request should go to root or secondary bus is done based on local + * device number. + * + * PCIe is point-to-point bus, so at secondary bus is always exactly one + * device with number 0. So set local device number to 1, it would not + * conflict with any device on secondary bus number and will ensure that + * accessing secondary bus and all buses behind secondary would work + * automatically and correctly. Therefore this configuration of local + * device number implies that setting of local bus number configures + * secondary bus number. Set it to 0 as U-Boot CONFIG_PCI_PNP code will + * later configure it via config write requests to the correct value. + * mvebu_pcie_write_config() catches config write requests which tries + * to change secondary bus number and correctly updates local bus number + * based on new secondary bus number. + * + * With this configuration is PCI Bridge available at secondary bus as + * device number 1. But it must be available at root bus (zero) as device + * number 0. So in mvebu_pcie_read_config() and mvebu_pcie_write_config() + * functions rewrite address to the real one when accessing the root bus. + */ + mvebu_pcie_set_local_bus_nr(pcie, 0); + mvebu_pcie_set_local_dev_nr(pcie, 1); + + /* + * Kirkwood arch code already maps mbus windows for PCIe IO and MEM. + * So skip calling mvebu_mbus_add_window_by_id() function as it would + * fail on error "conflicts with another window" which means conflict + * with existing PCIe window mappings. + */ +#ifndef CONFIG_ARCH_KIRKWOOD + if (resource_size(&pcie->mem) && + mvebu_mbus_add_window_by_id(pcie->mem_target, pcie->mem_attr, + (phys_addr_t)pcie->mem.start, + resource_size(&pcie->mem))) { + printf("%s: unable to add mbus window for mem at %08x+%08x\n", + pcie->name, + (u32)pcie->mem.start, (unsigned)resource_size(&pcie->mem)); + pcie->mem.start = 0; + pcie->mem.end = -1; + } + + if (resource_size(&pcie->io) && + mvebu_mbus_add_window_by_id(pcie->io_target, pcie->io_attr, + (phys_addr_t)pcie->io.start, + resource_size(&pcie->io))) { + printf("%s: unable to add mbus window for IO at %08x+%08x\n", + pcie->name, + (u32)pcie->io.start, (unsigned)resource_size(&pcie->io)); + pcie->io.start = 0; + pcie->io.end = -1; + } +#endif + + /* Setup windows and configure host bridge */ + mvebu_pcie_setup_wins(pcie); + + /* PCI memory space */ + pci_set_region(hose->regions + 0, pcie->mem.start, + pcie->mem.start, resource_size(&pcie->mem), PCI_REGION_MEM); + hose->region_count = 1; + + if (resource_size(&pcie->mem)) { + pci_set_region(hose->regions + hose->region_count, + pcie->mem.start, pcie->mem.start, + resource_size(&pcie->mem), + PCI_REGION_MEM); + hose->region_count++; + } + + if (resource_size(&pcie->io)) { + pci_set_region(hose->regions + hose->region_count, + pcie->io.start, pcie->io.start, + resource_size(&pcie->io), + PCI_REGION_IO); + hose->region_count++; + } + + /* PCI Bridge support 32-bit I/O and 64-bit prefetch mem addressing */ + pcie->cfgcache[(PCI_IO_BASE - 0x10) / 4] = + PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8); + pcie->cfgcache[(PCI_PREF_MEMORY_BASE - 0x10) / 4] = + PCI_PREF_RANGE_TYPE_64 | (PCI_PREF_RANGE_TYPE_64 << 16); + + /* Release PERST# via GPIO when it was defined */ + if (dm_gpio_is_valid(&pcie->reset_gpio)) + dm_gpio_set_value(&pcie->reset_gpio, 0); + + mvebu_pcie_wait_for_link(pcie); + + return 0; +} + +#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) +#define DT_TYPE_IO 0x1 +#define DT_TYPE_MEM32 0x2 +#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) +#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) + +static int mvebu_get_tgt_attr(ofnode node, int devfn, + unsigned long type, + unsigned int *tgt, + unsigned int *attr) +{ + const int na = 3, ns = 2; + const __be32 *range; + int rlen, nranges, rangesz, pna, i; + + *tgt = -1; + *attr = -1; + + range = ofnode_get_property(node, "ranges", &rlen); + if (!range) + return -EINVAL; + + /* + * Linux uses of_n_addr_cells() to get the number of address cells + * here. Currently this function is only available in U-Boot when + * CONFIG_OF_LIVE is enabled. Until this is enabled for MVEBU in + * general, lets't hardcode the "pna" value in the U-Boot code. + */ + pna = 2; /* hardcoded for now because of lack of of_n_addr_cells() */ + rangesz = pna + na + ns; + nranges = rlen / sizeof(__be32) / rangesz; + + for (i = 0; i < nranges; i++, range += rangesz) { + u32 flags = of_read_number(range, 1); + u32 slot = of_read_number(range + 1, 1); + u64 cpuaddr = of_read_number(range + na, pna); + unsigned long rtype; + + if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) + rtype = IORESOURCE_IO; + else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) + rtype = IORESOURCE_MEM; + else + continue; + + /* + * The Linux code used PCI_SLOT() here, which expects devfn + * in bits 7..0. PCI_DEV() in U-Boot is similar to PCI_SLOT(), + * only expects devfn in 15..8, where its saved in this driver. + */ + if (slot == PCI_DEV(devfn) && type == rtype) { + *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); + *attr = DT_CPUADDR_TO_ATTR(cpuaddr); + return 0; + } + } + + return -ENOENT; +} + +static int mvebu_pcie_port_parse_dt(ofnode node, ofnode parent, struct mvebu_pcie *pcie) +{ + struct fdt_pci_addr pci_addr; + const u32 *addr; + u32 num_lanes; + int ret = 0; + int len; + + /* Get port number, lane number and memory target / attr */ + if (ofnode_read_u32(node, "marvell,pcie-port", + &pcie->port)) { + ret = -ENODEV; + goto err; + } + + if (ofnode_read_u32(node, "marvell,pcie-lane", &pcie->lane)) + pcie->lane = 0; + + sprintf(pcie->name, "pcie%d.%d", pcie->port, pcie->lane); + + if (!ofnode_read_u32(node, "num-lanes", &num_lanes) && num_lanes == 4) + pcie->is_x4 = true; + + /* devfn is in bits [15:8], see PCI_DEV usage */ + ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg", &pci_addr, + NULL); + if (ret < 0) { + printf("%s: property \"reg\" is invalid\n", pcie->name); + goto err; + } + pcie->devfn = pci_addr.phys_hi & 0xff00; + + ret = mvebu_get_tgt_attr(parent, pcie->devfn, + IORESOURCE_MEM, + &pcie->mem_target, &pcie->mem_attr); + if (ret < 0) { + printf("%s: cannot get tgt/attr for mem window\n", pcie->name); + goto err; + } + + ret = mvebu_get_tgt_attr(parent, pcie->devfn, + IORESOURCE_IO, + &pcie->io_target, &pcie->io_attr); + if (ret < 0) { + printf("%s: cannot get tgt/attr for IO window\n", pcie->name); + goto err; + } + + /* Parse PCIe controller register base from DT */ + addr = ofnode_get_property(node, "assigned-addresses", &len); + if (!addr) { + printf("%s: property \"assigned-addresses\" not found\n", pcie->name); + ret = -FDT_ERR_NOTFOUND; + goto err; + } + + pcie->base = (void *)(u32)ofnode_translate_address(node, addr); + pcie->intregs = (u32)pcie->base - fdt32_to_cpu(addr[2]); + + return 0; + +err: + return ret; +} + +static const struct dm_pci_ops mvebu_pcie_ops = { + .read_config = mvebu_pcie_read_config, + .write_config = mvebu_pcie_write_config, +}; + +static struct driver pcie_mvebu_drv = { + .name = "pcie_mvebu", + .id = UCLASS_PCI, + .ops = &mvebu_pcie_ops, + .probe = mvebu_pcie_probe, + .plat_auto = sizeof(struct mvebu_pcie), +}; + +/* + * Use a MISC device to bind the n instances (child nodes) of the + * PCIe base controller in UCLASS_PCI. + */ +static int mvebu_pcie_bind(struct udevice *parent) +{ + struct mvebu_pcie **ports_pcie; + struct mvebu_pcie *pcie; + struct uclass_driver *drv; + struct udevice *dev; + struct resource mem; + struct resource io; + int ports_count, i; + ofnode *ports_nodes; + ofnode subnode; + + /* Lookup pci driver */ + drv = lists_uclass_lookup(UCLASS_PCI); + if (!drv) { + puts("Cannot find PCI driver\n"); + return -ENOENT; + } + + ports_count = ofnode_get_child_count(dev_ofnode(parent)); + ports_pcie = calloc(ports_count, sizeof(*ports_pcie)); + ports_nodes = calloc(ports_count, sizeof(*ports_nodes)); + if (!ports_pcie || !ports_nodes) { + free(ports_pcie); + free(ports_nodes); + return -ENOMEM; + } + ports_count = 0; + +#ifdef CONFIG_ARCH_KIRKWOOD + mem.start = KW_DEFADR_PCI_MEM; + mem.end = KW_DEFADR_PCI_MEM + KW_DEFADR_PCI_MEM_SIZE - 1; + io.start = KW_DEFADR_PCI_IO; + io.end = KW_DEFADR_PCI_IO + KW_DEFADR_PCI_IO_SIZE - 1; +#else + mem.start = MBUS_PCI_MEM_BASE; + mem.end = MBUS_PCI_MEM_BASE + MBUS_PCI_MEM_SIZE - 1; + io.start = MBUS_PCI_IO_BASE; + io.end = MBUS_PCI_IO_BASE + MBUS_PCI_IO_SIZE - 1; +#endif + + /* First phase: Fill mvebu_pcie struct for each port */ + ofnode_for_each_subnode(subnode, dev_ofnode(parent)) { + if (!ofnode_is_enabled(subnode)) + continue; + + pcie = calloc(1, sizeof(*pcie)); + if (!pcie) + continue; + + if (mvebu_pcie_port_parse_dt(subnode, dev_ofnode(parent), pcie) < 0) { + free(pcie); + continue; + } + + /* + * MVEBU PCIe controller needs MEMORY and I/O BARs to be mapped + * into SoCs address space. Each controller will map 128M of MEM + * and 64K of I/O space when registered. + */ + + if (resource_size(&mem) >= SZ_128M) { + pcie->mem.start = mem.start; + pcie->mem.end = mem.start + SZ_128M - 1; + mem.start += SZ_128M; + } else { + printf("%s: unable to assign mbus window for mem\n", pcie->name); + pcie->mem.start = 0; + pcie->mem.end = -1; + } + + if (resource_size(&io) >= SZ_64K) { + pcie->io.start = io.start; + pcie->io.end = io.start + SZ_64K - 1; + io.start += SZ_64K; + } else { + printf("%s: unable to assign mbus window for io\n", pcie->name); + pcie->io.start = 0; + pcie->io.end = -1; + } + + ports_pcie[ports_count] = pcie; + ports_nodes[ports_count] = subnode; + ports_count++; + } + + /* Second phase: Setup all PCIe links (do not enable them yet) */ + for (i = 0; i < ports_count; i++) + mvebu_pcie_setup_link(ports_pcie[i]); + + /* Third phase: Enable all PCIe links and create for each UCLASS_PCI device */ + for (i = 0; i < ports_count; i++) { + pcie = ports_pcie[i]; + subnode = ports_nodes[i]; + + /* + * PCIe link can be enabled only after all PCIe links were + * properly configured. This is because more PCIe links shares + * one enable bit and some PCIe links cannot be enabled + * individually. + */ + if (mvebu_pcie_enable_link(pcie, subnode) < 0) { + free(pcie); + continue; + } + + /* Create child device UCLASS_PCI and bind it */ + device_bind(parent, &pcie_mvebu_drv, pcie->name, pcie, subnode, + &dev); + } + + free(ports_pcie); + free(ports_nodes); + + return 0; +} + +static const struct udevice_id mvebu_pcie_ids[] = { + { .compatible = "marvell,armada-xp-pcie" }, + { .compatible = "marvell,armada-370-pcie" }, + { .compatible = "marvell,kirkwood-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_mvebu_base) = { + .name = "pcie_mvebu_base", + .id = UCLASS_MISC, + .of_match = mvebu_pcie_ids, + .bind = mvebu_pcie_bind, +}; diff --git a/drivers/pci/pci_octeontx.c b/drivers/pci/pci_octeontx.c new file mode 100644 index 00000000000..875cf7f7115 --- /dev/null +++ b/drivers/pci/pci_octeontx.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * https://spdx.org/licenses + */ + +#include <dm.h> +#include <errno.h> +#include <fdtdec.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <asm/global_data.h> + +#include <asm/io.h> + +#include <linux/ioport.h> + +DECLARE_GLOBAL_DATA_PTR; + +/* + * This driver supports multiple types of operations / host bridges / busses: + * + * OTX_ECAM: Octeon TX & TX2 ECAM (Enhanced Configuration Access Mechanism) + * Used to access the internal on-chip devices which are connected + * to internal buses + * OTX_PEM: Octeon TX PEM (PCI Express MAC) + * Used to access the external (off-chip) PCI devices + * OTX2_PEM: Octeon TX2 PEM (PCI Express MAC) + * Used to access the external (off-chip) PCI devices + */ +enum { + OTX_ECAM, + OTX_PEM, + OTX2_PEM, +}; + +/** + * struct octeontx_pci - Driver private data + * @type: Device type matched via compatible (e.g. OTX_ECAM etc) + * @cfg: Config resource + * @bus: Bus resource + */ +struct octeontx_pci { + unsigned int type; + + struct resource cfg; + struct resource bus; +}; + +static ulong readl_size(uintptr_t addr, enum pci_size_t size) +{ + ulong val; + + switch (size) { + case PCI_SIZE_8: + val = readb(addr); + break; + case PCI_SIZE_16: + val = readw(addr); + break; + case PCI_SIZE_32: + val = readl(addr); + break; + default: + printf("Invalid size\n"); + return -EINVAL; + }; + + return val; +} + +static void writel_size(uintptr_t addr, enum pci_size_t size, ulong valuep) +{ + switch (size) { + case PCI_SIZE_8: + writeb(valuep, addr); + break; + case PCI_SIZE_16: + writew(valuep, addr); + break; + case PCI_SIZE_32: + writel(valuep, addr); + break; + default: + printf("Invalid size\n"); + }; +} + +static bool octeontx_bdf_invalid(pci_dev_t bdf) +{ + if (PCI_BUS(bdf) == 1 && PCI_DEV(bdf) > 0) + return true; + + return false; +} + +static int octeontx_ecam_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + pcie->bus.start - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + *valuep = readl_size(pcie->cfg.start + address, size); + + debug("%02x.%02x.%02x: u%d %x -> %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, *valuep); + + return 0; +} + +static int octeontx_ecam_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + pcie->bus.start - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + writel_size(pcie->cfg.start + address, size, value); + + debug("%02x.%02x.%02x: u%d %x <- %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, value); + + return 0; +} + +static int octeontx_pem_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + u8 hdrtype; + u8 pri_bus = pcie->bus.start + 1 - hose->first_busno; + u32 bus_offs = (pri_bus << 16) | (pri_bus << 8) | (pri_bus << 0); + + *valuep = pci_conv_32_to_size(~0UL, offset, size); + + if (octeontx_bdf_invalid(bdf)) + return -EPERM; + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + 1 - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), 0) << 4; + *valuep = readl_size(pcie->cfg.start + address + offset, size); + + hdrtype = readb(pcie->cfg.start + address + PCI_HEADER_TYPE); + if (hdrtype == PCI_HEADER_TYPE_BRIDGE && + offset >= PCI_PRIMARY_BUS && + offset <= PCI_SUBORDINATE_BUS && + *valuep != pci_conv_32_to_size(~0UL, offset, size)) + *valuep -= pci_conv_32_to_size(bus_offs, offset, size); + + return 0; +} + +static int octeontx_pem_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + u8 hdrtype; + u8 pri_bus = pcie->bus.start + 1 - hose->first_busno; + u32 bus_offs = (pri_bus << 16) | (pri_bus << 8) | (pri_bus << 0); + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + 1 - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), 0) << 4; + + hdrtype = readb(pcie->cfg.start + address + PCI_HEADER_TYPE); + if (hdrtype == PCI_HEADER_TYPE_BRIDGE && + offset >= PCI_PRIMARY_BUS && + offset <= PCI_SUBORDINATE_BUS && + value != pci_conv_32_to_size(~0UL, offset, size)) + value += pci_conv_32_to_size(bus_offs, offset, size); + + if (octeontx_bdf_invalid(bdf)) + return -EPERM; + + writel_size(pcie->cfg.start + address + offset, size, value); + + debug("%02x.%02x.%02x: u%d %x (%lx) <- %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, + address, value); + + return 0; +} + +static int octeontx2_pem_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + + *valuep = pci_conv_32_to_size(~0UL, offset, size); + + if (octeontx_bdf_invalid(bdf)) + return -EPERM; + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + 1 - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + *valuep = readl_size(pcie->cfg.start + address, size); + + debug("%02x.%02x.%02x: u%d %x (%lx) -> %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, + address, *valuep); + + return 0; +} + +static int octeontx2_pem_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + uintptr_t address; + + if (octeontx_bdf_invalid(bdf)) + return -EPERM; + + address = PCIE_ECAM_OFFSET(PCI_BUS(bdf) + 1 - hose->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + writel_size(pcie->cfg.start + address, size, value); + + debug("%02x.%02x.%02x: u%d %x (%lx) <- %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, + address, value); + + return 0; +} + +int pci_octeontx_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + int ret = -EIO; + + switch (pcie->type) { + case OTX_ECAM: + ret = octeontx_ecam_read_config(bus, bdf, offset, valuep, + size); + break; + case OTX_PEM: + ret = octeontx_pem_read_config(bus, bdf, offset, valuep, + size); + break; + case OTX2_PEM: + ret = octeontx2_pem_read_config(bus, bdf, offset, valuep, + size); + break; + } + + return ret; +} + +int pci_octeontx_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus); + int ret = -EIO; + + switch (pcie->type) { + case OTX_ECAM: + ret = octeontx_ecam_write_config(bus, bdf, offset, value, + size); + break; + case OTX_PEM: + ret = octeontx_pem_write_config(bus, bdf, offset, value, + size); + break; + case OTX2_PEM: + ret = octeontx2_pem_write_config(bus, bdf, offset, value, + size); + break; + } + + return ret; +} + +static int pci_octeontx_of_to_plat(struct udevice *dev) +{ + return 0; +} + +static int pci_octeontx_probe(struct udevice *dev) +{ + struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(dev); + int err; + + pcie->type = dev_get_driver_data(dev); + + err = dev_read_resource(dev, 0, &pcie->cfg); + if (err) { + debug("Error reading resource: %s\n", fdt_strerror(err)); + return err; + } + + err = dev_read_pci_bus_range(dev, &pcie->bus); + if (err) { + debug("Error reading resource: %s\n", fdt_strerror(err)); + return err; + } + + return 0; +} + +static const struct dm_pci_ops pci_octeontx_ops = { + .read_config = pci_octeontx_read_config, + .write_config = pci_octeontx_write_config, +}; + +static const struct udevice_id pci_octeontx_ids[] = { + { .compatible = "cavium,pci-host-thunder-ecam", .data = OTX_ECAM }, + { .compatible = "cavium,pci-host-octeontx-ecam", .data = OTX_ECAM }, + { .compatible = "pci-host-ecam-generic", .data = OTX_ECAM }, + { .compatible = "cavium,pci-host-thunder-pem", .data = OTX_PEM }, + { .compatible = "marvell,pci-host-octeontx2-pem", .data = OTX2_PEM }, + { } +}; + +U_BOOT_DRIVER(pci_octeontx) = { + .name = "pci_octeontx", + .id = UCLASS_PCI, + .of_match = pci_octeontx_ids, + .ops = &pci_octeontx_ops, + .of_to_plat = pci_octeontx_of_to_plat, + .probe = pci_octeontx_probe, + .priv_auto = sizeof(struct octeontx_pci), + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/pci/pci_rom.c b/drivers/pci/pci_rom.c new file mode 100644 index 00000000000..78e5de937cd --- /dev/null +++ b/drivers/pci/pci_rom.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Google, Inc + * + * From coreboot, originally based on the Linux kernel (drivers/pci/pci.c). + * + * Modifications are: + * Copyright (C) 2003-2004 Linux Networx + * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx) + * Copyright (C) 2003-2006 Ronald G. Minnich <rminnich@gmail.com> + * Copyright (C) 2004-2005 Li-Ta Lo <ollie@lanl.gov> + * Copyright (C) 2005-2006 Tyan + * (Written by Yinghai Lu <yhlu@tyan.com> for Tyan) + * Copyright (C) 2005-2009 coresystems GmbH + * (Written by Stefan Reinauer <stepan@coresystems.de> for coresystems GmbH) + * + * PCI Bus Services, see include/linux/pci.h for further explanation. + * + * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, + * David Mosberger-Tang + * + * Copyright 1997 -- 1999 Martin Mares <mj@atrey.karlin.mff.cuni.cz> + */ + +#define LOG_CATEGORY UCLASS_PCI + +#include <bios_emul.h> +#include <bloblist.h> +#include <bootstage.h> +#include <dm.h> +#include <errno.h> +#include <init.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <pci_rom.h> +#include <spl.h> +#include <time.h> +#include <vesa.h> +#include <video.h> +#include <acpi/acpi_s3.h> +#include <asm/global_data.h> +#include <linux/screen_info.h> + +DECLARE_GLOBAL_DATA_PTR; + +__weak bool board_should_run_oprom(struct udevice *dev) +{ +#if defined(CONFIG_X86) && defined(CONFIG_HAVE_ACPI_RESUME) + if (gd->arch.prev_sleep_state == ACPI_S3) { + if (IS_ENABLED(CONFIG_S3_VGA_ROM_RUN)) + return true; + else + return false; + } +#endif + + return true; +} + +__weak bool board_should_load_oprom(struct udevice *dev) +{ + return true; +} + +__weak uint32_t board_map_oprom_vendev(uint32_t vendev) +{ + return vendev; +} + +static int pci_rom_probe(struct udevice *dev, struct pci_rom_header **hdrp) +{ + struct pci_child_plat *pplat = dev_get_parent_plat(dev); + struct pci_rom_header *rom_header; + struct pci_rom_data *rom_data; + u16 rom_vendor, rom_device; + u32 rom_class; + u32 vendev; + u32 mapped_vendev; + u32 rom_address; + + vendev = pplat->vendor << 16 | pplat->device; + mapped_vendev = board_map_oprom_vendev(vendev); + if (vendev != mapped_vendev) + debug("Device ID mapped to %#08x\n", mapped_vendev); + +#ifdef CONFIG_VGA_BIOS_ADDR + rom_address = CONFIG_VGA_BIOS_ADDR; +#else + + dm_pci_read_config32(dev, PCI_ROM_ADDRESS, &rom_address); + if (rom_address == 0x00000000 || rom_address == 0xffffffff) { + debug("%s: rom_address=%x\n", __func__, rom_address); + return -ENOENT; + } + rom_address &= PCI_ROM_ADDRESS_MASK; + + /* Enable expansion ROM address decoding. */ + dm_pci_write_config32(dev, PCI_ROM_ADDRESS, + rom_address | PCI_ROM_ADDRESS_ENABLE); +#endif + debug("Option ROM address %x\n", rom_address); + rom_header = (struct pci_rom_header *)(unsigned long)rom_address; + + debug("PCI expansion ROM, signature %#04x, INIT size %#04x, data ptr %#04x\n", + le16_to_cpu(rom_header->signature), + rom_header->size * 512, le16_to_cpu(rom_header->data)); + + if (le16_to_cpu(rom_header->signature) != PCI_ROM_HDR) { + printf("Incorrect expansion ROM header signature %04x\n", + le16_to_cpu(rom_header->signature)); +#ifndef CONFIG_VGA_BIOS_ADDR + /* Disable expansion ROM address decoding */ + dm_pci_write_config32(dev, PCI_ROM_ADDRESS, rom_address); +#endif + return -EINVAL; + } + + rom_data = (((void *)rom_header) + le16_to_cpu(rom_header->data)); + rom_vendor = le16_to_cpu(rom_data->vendor); + rom_device = le16_to_cpu(rom_data->device); + + debug("PCI ROM image, vendor ID %04x, device ID %04x,\n", + rom_vendor, rom_device); + + /* If the device id is mapped, a mismatch is expected */ + if ((pplat->vendor != rom_vendor || pplat->device != rom_device) && + (vendev == mapped_vendev)) { + printf("ID mismatch: vendor ID %04x, device ID %04x\n", + rom_vendor, rom_device); + /* Continue anyway */ + } + + rom_class = (le16_to_cpu(rom_data->class_hi) << 8) | rom_data->class_lo; + debug("PCI ROM image, Class Code %06x, Code Type %02x\n", + rom_class, rom_data->type); + + if (pplat->class != rom_class) { + debug("Class Code mismatch ROM %06x, dev %06x\n", + rom_class, pplat->class); + } + *hdrp = rom_header; + + return 0; +} + +/** + * pci_rom_load() - Load a ROM image and return a pointer to it + * + * @rom_header: Pointer to ROM image + * @ram_headerp: Returns a pointer to the image in RAM + * @allocedp: Returns true if @ram_headerp was allocated and needs + * to be freed + * Return: 0 if OK, -ve on error. Note that @allocedp is set up regardless of + * the error state. Even if this function returns an error, it may have + * allocated memory. + */ +static int pci_rom_load(struct pci_rom_header *rom_header, + struct pci_rom_header **ram_headerp, bool *allocedp) +{ + struct pci_rom_data *rom_data; + unsigned int rom_size; + unsigned int image_size = 0; + void *target; + + *allocedp = false; + do { + /* Get next image, until we see an x86 version */ + rom_header = (struct pci_rom_header *)((void *)rom_header + + image_size); + + rom_data = (struct pci_rom_data *)((void *)rom_header + + le16_to_cpu(rom_header->data)); + + image_size = le16_to_cpu(rom_data->ilen) * 512; + } while ((rom_data->type != 0) && (rom_data->indicator == 0)); + + if (rom_data->type != 0) + return -EACCES; + + rom_size = rom_header->size * 512; + +#ifdef PCI_VGA_RAM_IMAGE_START + target = (void *)PCI_VGA_RAM_IMAGE_START; +#else + target = (void *)malloc(rom_size); + if (!target) + return -ENOMEM; + *allocedp = true; +#endif + if (target != rom_header) { + ulong start = get_timer(0); + + debug("Copying VGA ROM Image from %p to %p, 0x%x bytes\n", + rom_header, target, rom_size); + memcpy(target, rom_header, rom_size); + if (memcmp(target, rom_header, rom_size)) { + printf("VGA ROM copy failed\n"); + return -EFAULT; + } + debug("Copy took %lums\n", get_timer(start)); + } + *ram_headerp = target; + + return 0; +} + +struct vesa_state mode_info; + +void setup_video(struct screen_info *screen_info) +{ + struct vesa_mode_info *vesa = &mode_info.vesa; + + /* Sanity test on VESA parameters */ + if (!vesa->x_resolution || !vesa->y_resolution) + return; + + screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; + + screen_info->lfb_width = vesa->x_resolution; + screen_info->lfb_height = vesa->y_resolution; + screen_info->lfb_depth = vesa->bits_per_pixel; + screen_info->lfb_linelength = vesa->bytes_per_scanline; + screen_info->lfb_base = vesa->phys_base_ptr; + screen_info->lfb_size = + ALIGN(screen_info->lfb_linelength * screen_info->lfb_height, + 65536); + screen_info->lfb_size >>= 16; + screen_info->red_size = vesa->red_mask_size; + screen_info->red_pos = vesa->red_mask_pos; + screen_info->green_size = vesa->green_mask_size; + screen_info->green_pos = vesa->green_mask_pos; + screen_info->blue_size = vesa->blue_mask_size; + screen_info->blue_pos = vesa->blue_mask_pos; + screen_info->rsvd_size = vesa->reserved_mask_size; + screen_info->rsvd_pos = vesa->reserved_mask_pos; +} + +int dm_pci_run_vga_bios(struct udevice *dev, int (*int15_handler)(void), + int exec_method) +{ + struct pci_child_plat *pplat = dev_get_parent_plat(dev); + struct pci_rom_header *rom = NULL, *ram = NULL; + int vesa_mode = -1; + bool emulate, alloced; + int ret; + + /* Only execute VGA ROMs */ + if (((pplat->class >> 8) ^ PCI_CLASS_DISPLAY_VGA) & 0xff00) { + debug("%s: Class %#x, should be %#x\n", __func__, pplat->class, + PCI_CLASS_DISPLAY_VGA); + return -ENODEV; + } + + if (!board_should_load_oprom(dev)) + return log_msg_ret("Should not load OPROM", -ENXIO); + + ret = pci_rom_probe(dev, &rom); + if (ret) + return log_msg_ret("pro", ret); + + ret = pci_rom_load(rom, &ram, &alloced); + if (ret) { + ret = log_msg_ret("ld", ret); + goto err; + } + + if (!board_should_run_oprom(dev)) { + ret = log_msg_ret("run", -ENXIO); + goto err; + } + +#if defined(CONFIG_FRAMEBUFFER_SET_VESA_MODE) && \ + defined(CONFIG_FRAMEBUFFER_VESA_MODE) + vesa_mode = CONFIG_FRAMEBUFFER_VESA_MODE; +#endif + debug("Selected vesa mode 0x%x\n", vesa_mode); + + if (exec_method & PCI_ROM_USE_NATIVE) { +#ifdef CONFIG_X86 + emulate = false; +#else + if (!(exec_method & PCI_ROM_ALLOW_FALLBACK)) { + printf("BIOS native execution is only available on x86\n"); + ret = -ENOSYS; + goto err; + } + emulate = true; +#endif + } else { +#ifdef CONFIG_BIOSEMU + emulate = true; +#else + if (!(exec_method & PCI_ROM_ALLOW_FALLBACK)) { + printf("BIOS emulation not available - see CONFIG_BIOSEMU\n"); + ret = -ENOSYS; + goto err; + } + emulate = false; +#endif + } + + if (emulate) { + if (CONFIG_IS_ENABLED(BIOSEMU)) { + BE_VGAInfo *info; + + log_debug("Running video BIOS with emulator..."); + ret = biosemu_setup(dev, &info); + if (ret) + goto err; + biosemu_set_interrupt_handler(0x15, int15_handler); + ret = biosemu_run(dev, (uchar *)ram, 1 << 16, info, + true, vesa_mode, &mode_info); + log_debug("done\n"); + if (ret) + goto err; + } + } else { +#if defined(CONFIG_X86) && (CONFIG_IS_ENABLED(X86_32BIT_INIT) || CONFIG_TPL) + log_debug("Running video BIOS..."); + bios_set_interrupt_handler(0x15, int15_handler); + + bios_run_on_x86(dev, (unsigned long)ram, vesa_mode, + &mode_info); + log_debug("done\n"); +#endif + } + debug("Final vesa mode %x\n", mode_info.video_mode); + ret = 0; + +err: + if (alloced) + free(ram); + return ret; +} + +int vesa_setup_video_priv(struct vesa_mode_info *vesa, u64 fb, + struct video_priv *uc_priv, + struct video_uc_plat *plat) +{ + if (!vesa->x_resolution) + return log_msg_ret("No x resolution", -ENXIO); + uc_priv->xsize = vesa->x_resolution; + uc_priv->ysize = vesa->y_resolution; + uc_priv->line_length = vesa->bytes_per_scanline; + switch (vesa->bits_per_pixel) { + case 32: + case 24: + uc_priv->bpix = VIDEO_BPP32; + break; + case 16: + uc_priv->bpix = VIDEO_BPP16; + break; + default: + return -EPROTONOSUPPORT; + } + + /* Use double buffering if enabled */ + if (IS_ENABLED(CONFIG_VIDEO_COPY) && plat->base) + plat->copy_base = fb; + else + plat->base = fb; + log_debug("base = %lx, copy_base = %lx\n", plat->base, plat->copy_base); + plat->size = vesa->bytes_per_scanline * vesa->y_resolution; + + return 0; +} + +int vesa_setup_video(struct udevice *dev, int (*int15_handler)(void)) +{ + struct video_uc_plat *plat = dev_get_uclass_plat(dev); + struct video_priv *uc_priv = dev_get_uclass_priv(dev); + int ret; + + /* If we are running from EFI or coreboot, this can't work */ + if (!ll_boot_init()) { + printf("Not available (previous bootloader prevents it)\n"); + return -EPERM; + } + + /* In U-Boot proper, collect the information added by SPL (see below) */ + if (IS_ENABLED(CONFIG_SPL_VIDEO) && spl_phase() > PHASE_SPL && + CONFIG_IS_ENABLED(BLOBLIST)) { + struct video_handoff *ho; + + ho = bloblist_find(BLOBLISTT_U_BOOT_VIDEO, sizeof(*ho)); + if (!ho) + return log_msg_ret("blf", -ENOENT); + plat->base = ho->fb; + plat->size = ho->size; + uc_priv->xsize = ho->xsize; + uc_priv->ysize = ho->ysize; + uc_priv->line_length = ho->line_length; + uc_priv->bpix = ho->bpix; + } else { + bootstage_start(BOOTSTAGE_ID_ACCUM_LCD, "vesa display"); + ret = dm_pci_run_vga_bios(dev, int15_handler, + PCI_ROM_USE_NATIVE | + PCI_ROM_ALLOW_FALLBACK); + bootstage_accum(BOOTSTAGE_ID_ACCUM_LCD); + if (ret) { + debug("failed to run video BIOS: %d\n", ret); + return ret; + } + + ret = vesa_setup_video_priv(&mode_info.vesa, + mode_info.vesa.phys_base_ptr, + uc_priv, plat); + if (ret) { + if (ret == -ENFILE) { + /* + * See video-uclass.c for how to set up reserved + * memory in your video driver + */ + log_err("CONFIG_VIDEO_COPY enabled but driver '%s' set up no reserved memory\n", + dev->driver->name); + } + + debug("No video mode configured\n"); + return ret; + } + } + + printf("Video: %dx%dx%d\n", uc_priv->xsize, uc_priv->ysize, + mode_info.vesa.bits_per_pixel); + + /* In SPL, store the information for use by U-Boot proper */ + if (spl_phase() == PHASE_SPL && CONFIG_IS_ENABLED(BLOBLIST)) { + struct video_handoff *ho; + + ho = bloblist_add(BLOBLISTT_U_BOOT_VIDEO, sizeof(*ho), 0); + if (!ho) + return log_msg_ret("blc", -ENOMEM); + + ho->fb = plat->base; + ho->size = plat->size; + ho->xsize = uc_priv->xsize; + ho->ysize = uc_priv->ysize; + ho->line_length = uc_priv->line_length; + ho->bpix = uc_priv->bpix; + } + + return 0; +} diff --git a/drivers/pci/pci_sandbox.c b/drivers/pci/pci_sandbox.c new file mode 100644 index 00000000000..fed0850458d --- /dev/null +++ b/drivers/pci/pci_sandbox.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2014 Google, Inc + * Written by Simon Glass <sjg@chromium.org> + */ + +#include <dm.h> +#include <fdtdec.h> +#include <log.h> +#include <pci.h> + +#define FDT_DEV_INFO_CELLS 4 +#define FDT_DEV_INFO_SIZE (FDT_DEV_INFO_CELLS * sizeof(u32)) + +#define SANDBOX_PCI_DEVFN(d, f) ((d << 3) | f) + +struct sandbox_pci_priv { + struct { + u16 vendor; + u16 device; + } vendev[256]; +}; + +static int sandbox_pci_write_config(struct udevice *bus, pci_dev_t devfn, + uint offset, ulong value, + enum pci_size_t size) +{ + struct dm_pci_emul_ops *ops; + struct udevice *container, *emul; + int ret; + + ret = sandbox_pci_get_emul(bus, devfn, &container, &emul); + if (ret) + return ret == -ENODEV ? 0 : ret; + ops = pci_get_emul_ops(emul); + if (!ops || !ops->write_config) + return -ENOSYS; + + return ops->write_config(emul, offset, value, size); +} + +static int sandbox_pci_read_config(const struct udevice *bus, pci_dev_t devfn, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct dm_pci_emul_ops *ops; + struct udevice *container, *emul; + struct sandbox_pci_priv *priv = dev_get_priv(bus); + int ret; + + /* Prepare the default response */ + *valuep = pci_get_ff(size); + ret = sandbox_pci_get_emul(bus, devfn, &container, &emul); + if (ret) { + if (!container) { + u16 vendor, device; + + devfn = SANDBOX_PCI_DEVFN(PCI_DEV(devfn), + PCI_FUNC(devfn)); + vendor = priv->vendev[devfn].vendor; + device = priv->vendev[devfn].device; + if (offset == PCI_VENDOR_ID && vendor) + *valuep = vendor; + else if (offset == PCI_DEVICE_ID && device) + *valuep = device; + + return 0; + } else { + return ret == -ENODEV ? 0 : ret; + } + } + ops = pci_get_emul_ops(emul); + if (!ops || !ops->read_config) + return -ENOSYS; + + return ops->read_config(emul, offset, valuep, size); +} + +static int sandbox_pci_probe(struct udevice *dev) +{ + struct sandbox_pci_priv *priv = dev_get_priv(dev); + const fdt32_t *cell; + u8 pdev, pfn, devfn; + int len; + + cell = ofnode_get_property(dev_ofnode(dev), "sandbox,dev-info", &len); + if (!cell) + return 0; + + if ((len % FDT_DEV_INFO_SIZE) == 0) { + int num = len / FDT_DEV_INFO_SIZE; + int i; + + for (i = 0; i < num; i++) { + debug("dev info #%d: %02x %02x %04x %04x\n", i, + fdt32_to_cpu(cell[0]), fdt32_to_cpu(cell[1]), + fdt32_to_cpu(cell[2]), fdt32_to_cpu(cell[3])); + + pdev = fdt32_to_cpu(cell[0]); + pfn = fdt32_to_cpu(cell[1]); + if (pdev > 31 || pfn > 7) + continue; + devfn = SANDBOX_PCI_DEVFN(pdev, pfn); + priv->vendev[devfn].vendor = fdt32_to_cpu(cell[2]); + priv->vendev[devfn].device = fdt32_to_cpu(cell[3]); + + cell += FDT_DEV_INFO_CELLS; + } + } + + return 0; +} + +static const struct dm_pci_ops sandbox_pci_ops = { + .read_config = sandbox_pci_read_config, + .write_config = sandbox_pci_write_config, +}; + +static const struct udevice_id sandbox_pci_ids[] = { + { .compatible = "sandbox,pci" }, + { } +}; + +U_BOOT_DRIVER(pci_sandbox) = { + .name = "pci_sandbox", + .id = UCLASS_PCI, + .of_match = sandbox_pci_ids, + .ops = &sandbox_pci_ops, + .probe = sandbox_pci_probe, + .priv_auto = sizeof(struct sandbox_pci_priv), + + /* Attach an emulator if we can */ + .child_post_bind = dm_scan_fdt_dev, + .per_child_plat_auto = sizeof(struct pci_child_plat), +}; diff --git a/drivers/pci/pci_sh7751.c b/drivers/pci/pci_sh7751.c new file mode 100644 index 00000000000..3cd01e9b94a --- /dev/null +++ b/drivers/pci/pci_sh7751.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SH7751 PCI Controller (PCIC) for U-Boot. + * (C) Dustin McIntire (dustin@sensoria.com) + * (C) 2007,2008 Nobuhiro Iwamatsu <iwamatsu@nigauri.org> + */ + +#include <config.h> +#include <dm.h> +#include <pci.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/pci.h> +#include <linux/bitops.h> +#include <linux/delay.h> + +/* Register addresses and such */ +#define SH7751_BCR1 (vu_long *)0xFF800000 +#define SH7751_BCR2 (vu_short *)0xFF800004 +#define SH7751_WCR1 (vu_long *)0xFF800008 +#define SH7751_WCR2 (vu_long *)0xFF80000C +#define SH7751_WCR3 (vu_long *)0xFF800010 +#define SH7751_MCR (vu_long *)0xFF800014 +#define SH7751_BCR3 (vu_short *)0xFF800050 +#define SH7751_PCICONF0 (vu_long *)0xFE200000 +#define SH7751_PCICONF1 (vu_long *)0xFE200004 +#define SH7751_PCICONF2 (vu_long *)0xFE200008 +#define SH7751_PCICONF3 (vu_long *)0xFE20000C +#define SH7751_PCICONF4 (vu_long *)0xFE200010 +#define SH7751_PCICONF5 (vu_long *)0xFE200014 +#define SH7751_PCICONF6 (vu_long *)0xFE200018 +#define SH7751_PCICR (vu_long *)0xFE200100 +#define SH7751_PCILSR0 (vu_long *)0xFE200104 +#define SH7751_PCILSR1 (vu_long *)0xFE200108 +#define SH7751_PCILAR0 (vu_long *)0xFE20010C +#define SH7751_PCILAR1 (vu_long *)0xFE200110 +#define SH7751_PCIMBR (vu_long *)0xFE2001C4 +#define SH7751_PCIIOBR (vu_long *)0xFE2001C8 +#define SH7751_PCIPINT (vu_long *)0xFE2001CC +#define SH7751_PCIPINTM (vu_long *)0xFE2001D0 +#define SH7751_PCICLKR (vu_long *)0xFE2001D4 +#define SH7751_PCIBCR1 (vu_long *)0xFE2001E0 +#define SH7751_PCIBCR2 (vu_long *)0xFE2001E4 +#define SH7751_PCIWCR1 (vu_long *)0xFE2001E8 +#define SH7751_PCIWCR2 (vu_long *)0xFE2001EC +#define SH7751_PCIWCR3 (vu_long *)0xFE2001F0 +#define SH7751_PCIMCR (vu_long *)0xFE2001F4 +#define SH7751_PCIBCR3 (vu_long *)0xFE2001F8 + +#define BCR1_BREQEN 0x00080000 +#define PCI_SH7751_ID 0x35051054 +#define PCI_SH7751R_ID 0x350E1054 +#define SH7751_PCICONF1_WCC 0x00000080 +#define SH7751_PCICONF1_PER 0x00000040 +#define SH7751_PCICONF1_BUM 0x00000004 +#define SH7751_PCICONF1_MES 0x00000002 +#define SH7751_PCICONF1_CMDS 0x000000C6 +#define SH7751_PCI_HOST_BRIDGE 0x6 +#define SH7751_PCICR_PREFIX 0xa5000000 +#define SH7751_PCICR_PRST 0x00000002 +#define SH7751_PCICR_CFIN 0x00000001 +#define SH7751_PCIPINT_D3 0x00000002 +#define SH7751_PCIPINT_D0 0x00000001 +#define SH7751_PCICLKR_PREFIX 0xa5000000 + +#define SH7751_PCI_MEM_BASE 0xFD000000 +#define SH7751_PCI_MEM_SIZE 0x01000000 +#define SH7751_PCI_IO_BASE 0xFE240000 +#define SH7751_PCI_IO_SIZE 0x00040000 + +#define SH7751_PCIPAR (vu_long *)0xFE2001C0 +#define SH7751_PCIPDR (vu_long *)0xFE200220 + +#define p4_in(addr) (*addr) +#define p4_out(data, addr) (*addr) = (data) + +static int sh7751_pci_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *value, + enum pci_size_t size) +{ + u32 addr, reg; + + addr = PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset); + p4_out(addr, SH7751_PCIPAR); + reg = p4_in(SH7751_PCIPDR); + *value = pci_conv_32_to_size(reg, offset, size); + + return 0; +} + +static int sh7751_pci_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + u32 addr, reg, old; + + addr = PCI_CONF1_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset); + p4_out(addr, SH7751_PCIPAR); + old = p4_in(SH7751_PCIPDR); + reg = pci_conv_size_to_32(old, value, offset, size); + p4_out(reg, SH7751_PCIPDR); + + return 0; +} + +static int sh7751_pci_probe(struct udevice *dev) +{ + /* Double-check that we're a 7751 or 7751R chip */ + if (p4_in(SH7751_PCICONF0) != PCI_SH7751_ID + && p4_in(SH7751_PCICONF0) != PCI_SH7751R_ID) { + printf("PCI: Unknown PCI host bridge.\n"); + return 1; + } + printf("PCI: SH7751 PCI host bridge found.\n"); + + /* Double-check some BSC config settings */ + /* (Area 3 non-MPX 32-bit, PCI bus pins) */ + if ((p4_in(SH7751_BCR1) & 0x20008) == 0x20000) { + printf("SH7751_BCR1 value is wrong(0x%08X)\n", + (unsigned int)p4_in(SH7751_BCR1)); + return 2; + } + if ((p4_in(SH7751_BCR2) & 0xC0) != 0xC0) { + printf("SH7751_BCR2 value is wrong(0x%08X)\n", + (unsigned int)p4_in(SH7751_BCR2)); + return 3; + } + if (p4_in(SH7751_BCR2) & 0x01) { + printf("SH7751_BCR2 value is wrong(0x%08X)\n", + (unsigned int)p4_in(SH7751_BCR2)); + return 4; + } + + /* Force BREQEN in BCR1 to allow PCIC access */ + p4_out((p4_in(SH7751_BCR1) | BCR1_BREQEN), SH7751_BCR1); + + /* Toggle PCI reset pin */ + p4_out((SH7751_PCICR_PREFIX | SH7751_PCICR_PRST), SH7751_PCICR); + udelay(32); + p4_out(SH7751_PCICR_PREFIX, SH7751_PCICR); + + /* Set cmd bits: WCC, PER, BUM, MES */ + /* (Addr/Data stepping, Parity enabled, Bus Master, Memory enabled) */ + p4_out(0xfb900047, SH7751_PCICONF1); /* K.Kino */ + + /* Define this host as the host bridge */ + p4_out((SH7751_PCI_HOST_BRIDGE << 24), SH7751_PCICONF2); + + /* Force PCI clock(s) on */ + p4_out(0, SH7751_PCICLKR); + p4_out(0x03, SH7751_PCICLKR); + + /* Clear powerdown IRQs, also mask them (unused) */ + p4_out((SH7751_PCIPINT_D0 | SH7751_PCIPINT_D3), SH7751_PCIPINT); + p4_out(0, SH7751_PCIPINTM); + + p4_out(0xab000001, SH7751_PCICONF4); + + /* Set up target memory mappings (for external DMA access) */ + /* Map both P0 and P2 range to Area 3 RAM for ease of use */ + p4_out(CFG_SYS_SDRAM_SIZE - 0x100000, SH7751_PCILSR0); + p4_out(CFG_SYS_SDRAM_BASE & 0x1FF00000, SH7751_PCILAR0); + p4_out(CFG_SYS_SDRAM_BASE & 0xFFF00000, SH7751_PCICONF5); + + p4_out(0, SH7751_PCILSR1); + p4_out(0, SH7751_PCILAR1); + p4_out(0xd0000000, SH7751_PCICONF6); + + /* Map memory window to same address on PCI bus */ + p4_out(SH7751_PCI_MEM_BASE, SH7751_PCIMBR); + + /* Map IO window to same address on PCI bus */ + p4_out(SH7751_PCI_IO_BASE, SH7751_PCIIOBR); + + /* set BREQEN */ + p4_out(inl(SH7751_BCR1) | 0x00080000, SH7751_BCR1); + + /* Copy BSC registers into PCI BSC */ + p4_out(inl(SH7751_BCR1), SH7751_PCIBCR1); + p4_out(inw(SH7751_BCR2), SH7751_PCIBCR2); + p4_out(inw(SH7751_BCR3), SH7751_PCIBCR3); + p4_out(inl(SH7751_WCR1), SH7751_PCIWCR1); + p4_out(inl(SH7751_WCR2), SH7751_PCIWCR2); + p4_out(inl(SH7751_WCR3), SH7751_PCIWCR3); + p4_out(inl(SH7751_MCR), SH7751_PCIMCR); + + /* Finally, set central function init complete */ + p4_out((SH7751_PCICR_PREFIX | SH7751_PCICR_CFIN), SH7751_PCICR); + + return 0; +} + +static const struct dm_pci_ops sh7751_pci_ops = { + .read_config = sh7751_pci_read_config, + .write_config = sh7751_pci_write_config, +}; + +static const struct udevice_id sh7751_pci_ids[] = { + { .compatible = "renesas,pci-sh7751" }, + { } +}; + +U_BOOT_DRIVER(sh7751_pci) = { + .name = "sh7751_pci", + .id = UCLASS_PCI, + .of_match = sh7751_pci_ids, + .ops = &sh7751_pci_ops, + .probe = sh7751_pci_probe, +}; diff --git a/drivers/pci/pci_tegra.c b/drivers/pci/pci_tegra.c new file mode 100644 index 00000000000..bb8832c6ab9 --- /dev/null +++ b/drivers/pci/pci_tegra.c @@ -0,0 +1,1198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2010, CompuLab, Ltd. + * Author: Mike Rapoport <mike@compulab.co.il> + * + * Based on NVIDIA PCIe driver + * Copyright (c) 2008-2009, NVIDIA Corporation. + * + * Copyright (c) 2013-2014, NVIDIA Corporation. + */ + +#define pr_fmt(fmt) "tegra-pcie: " fmt + +#include <clk.h> +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <pci_tegra.h> +#include <power-domain.h> +#include <reset.h> +#include <linux/delay.h> +#include <linux/printk.h> + +#include <asm/io.h> +#include <asm/gpio.h> + +#include <linux/ioport.h> +#include <linux/list.h> + +#ifndef CONFIG_TEGRA186 +#include <asm/arch/clock.h> +#include <asm/arch/powergate.h> +#include <asm/arch-tegra/xusb-padctl.h> +#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h> +#endif + +/* + * FIXME: TODO: This driver contains a number of ifdef CONFIG_TEGRA186 that + * should not be present. These are needed because newer Tegra SoCs support + * only the standard clock/reset APIs, whereas older Tegra SoCs support only + * a custom Tegra-specific API. ASAP the older Tegra SoCs' code should be + * fixed to implement the standard APIs, and all drivers converted to solely + * use the new standard APIs, with no ifdefs. + */ + +#define AFI_AXI_BAR0_SZ 0x00 +#define AFI_AXI_BAR1_SZ 0x04 +#define AFI_AXI_BAR2_SZ 0x08 +#define AFI_AXI_BAR3_SZ 0x0c +#define AFI_AXI_BAR4_SZ 0x10 +#define AFI_AXI_BAR5_SZ 0x14 + +#define AFI_AXI_BAR0_START 0x18 +#define AFI_AXI_BAR1_START 0x1c +#define AFI_AXI_BAR2_START 0x20 +#define AFI_AXI_BAR3_START 0x24 +#define AFI_AXI_BAR4_START 0x28 +#define AFI_AXI_BAR5_START 0x2c + +#define AFI_FPCI_BAR0 0x30 +#define AFI_FPCI_BAR1 0x34 +#define AFI_FPCI_BAR2 0x38 +#define AFI_FPCI_BAR3 0x3c +#define AFI_FPCI_BAR4 0x40 +#define AFI_FPCI_BAR5 0x44 + +#define AFI_CACHE_BAR0_SZ 0x48 +#define AFI_CACHE_BAR0_ST 0x4c +#define AFI_CACHE_BAR1_SZ 0x50 +#define AFI_CACHE_BAR1_ST 0x54 + +#define AFI_MSI_BAR_SZ 0x60 +#define AFI_MSI_FPCI_BAR_ST 0x64 +#define AFI_MSI_AXI_BAR_ST 0x68 + +#define AFI_CONFIGURATION 0xac +#define AFI_CONFIGURATION_EN_FPCI (1 << 0) + +#define AFI_FPCI_ERROR_MASKS 0xb0 + +#define AFI_INTR_MASK 0xb4 +#define AFI_INTR_MASK_INT_MASK (1 << 0) +#define AFI_INTR_MASK_MSI_MASK (1 << 8) + +#define AFI_SM_INTR_ENABLE 0xc4 +#define AFI_SM_INTR_INTA_ASSERT (1 << 0) +#define AFI_SM_INTR_INTB_ASSERT (1 << 1) +#define AFI_SM_INTR_INTC_ASSERT (1 << 2) +#define AFI_SM_INTR_INTD_ASSERT (1 << 3) +#define AFI_SM_INTR_INTA_DEASSERT (1 << 4) +#define AFI_SM_INTR_INTB_DEASSERT (1 << 5) +#define AFI_SM_INTR_INTC_DEASSERT (1 << 6) +#define AFI_SM_INTR_INTD_DEASSERT (1 << 7) + +#define AFI_AFI_INTR_ENABLE 0xc8 +#define AFI_INTR_EN_INI_SLVERR (1 << 0) +#define AFI_INTR_EN_INI_DECERR (1 << 1) +#define AFI_INTR_EN_TGT_SLVERR (1 << 2) +#define AFI_INTR_EN_TGT_DECERR (1 << 3) +#define AFI_INTR_EN_TGT_WRERR (1 << 4) +#define AFI_INTR_EN_DFPCI_DECERR (1 << 5) +#define AFI_INTR_EN_AXI_DECERR (1 << 6) +#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) +#define AFI_INTR_EN_PRSNT_SENSE (1 << 8) + +#define AFI_PCIE_CONFIG 0x0f8 +#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) +#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_401 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_211 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_111 (0x2 << 20) + +#define AFI_FUSE 0x104 +#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) + +#define AFI_PEX0_CTRL 0x110 +#define AFI_PEX1_CTRL 0x118 +#define AFI_PEX2_CTRL 0x128 +#define AFI_PEX2_CTRL_T186 0x19c +#define AFI_PEX_CTRL_RST (1 << 0) +#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) +#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) +#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) + +#define AFI_PLLE_CONTROL 0x160 +#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) +#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) + +#define AFI_PEXBIAS_CTRL_0 0x168 + +#define PADS_CTL_SEL 0x0000009C + +#define PADS_CTL 0x000000A0 +#define PADS_CTL_IDDQ_1L (1 << 0) +#define PADS_CTL_TX_DATA_EN_1L (1 << 6) +#define PADS_CTL_RX_DATA_EN_1L (1 << 10) + +#define PADS_PLL_CTL_TEGRA20 0x000000B8 +#define PADS_PLL_CTL_TEGRA30 0x000000B4 +#define PADS_PLL_CTL_RST_B4SM (0x1 << 1) +#define PADS_PLL_CTL_LOCKDET (0x1 << 8) +#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) +#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0x0 << 16) +#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (0x1 << 16) +#define PADS_PLL_CTL_REFCLK_EXTERNAL (0x2 << 16) +#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) +#define PADS_PLL_CTL_TXCLKREF_DIV10 (0x0 << 20) +#define PADS_PLL_CTL_TXCLKREF_DIV5 (0x1 << 20) +#define PADS_PLL_CTL_TXCLKREF_BUF_EN (0x1 << 22) + +#define PADS_REFCLK_CFG0 0x000000C8 +#define PADS_REFCLK_CFG1 0x000000CC + +/* + * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit + * entries, one entry per PCIe port. These field definitions and desired + * values aren't in the TRM, but do come from NVIDIA. + */ +#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ +#define PADS_REFCLK_CFG_E_TERM_SHIFT 7 +#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ +#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ + +#define RP_VEND_XP 0x00000F00 +#define RP_VEND_XP_DL_UP (1 << 30) + +#define RP_VEND_CTL2 0x00000FA8 +#define RP_VEND_CTL2_PCA_ENABLE (1 << 7) + +#define RP_PRIV_MISC 0x00000FE0 +#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0) + +#define RP_LINK_CONTROL_STATUS 0x00000090 +#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 +#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 + +enum tegra_pci_id { + TEGRA20_PCIE, + TEGRA30_PCIE, + TEGRA124_PCIE, + TEGRA210_PCIE, + TEGRA186_PCIE, +}; + +struct tegra_pcie_port { + struct tegra_pcie *pcie; + + struct fdt_resource regs; + unsigned int num_lanes; + unsigned int index; + + struct list_head list; +}; + +struct tegra_pcie_soc { + unsigned int num_ports; + unsigned long pads_pll_ctl; + unsigned long tx_ref_sel; + unsigned long afi_pex2_ctrl; + u32 pads_refclk_cfg0; + u32 pads_refclk_cfg1; + bool has_pex_clkreq_en; + bool has_pex_bias_ctrl; + bool has_cml_clk; + bool has_gen2; + bool force_pca_enable; +}; + +struct tegra_pcie { + struct resource pads; + struct resource afi; + struct resource cs; + + struct list_head ports; + unsigned long xbar; + + const struct tegra_pcie_soc *soc; + +#ifdef CONFIG_TEGRA186 + struct clk clk_afi; + struct clk clk_pex; + struct reset_ctl reset_afi; + struct reset_ctl reset_pex; + struct reset_ctl reset_pcie_x; + struct power_domain pwrdom; +#else + struct tegra_xusb_phy *phy; +#endif +}; + +static void afi_writel(struct tegra_pcie *pcie, unsigned long value, + unsigned long offset) +{ + writel(value, pcie->afi.start + offset); +} + +static unsigned long afi_readl(struct tegra_pcie *pcie, unsigned long offset) +{ + return readl(pcie->afi.start + offset); +} + +static void pads_writel(struct tegra_pcie *pcie, unsigned long value, + unsigned long offset) +{ + writel(value, pcie->pads.start + offset); +} + +#ifndef CONFIG_TEGRA186 +static unsigned long pads_readl(struct tegra_pcie *pcie, unsigned long offset) +{ + return readl(pcie->pads.start + offset); +} +#endif + +static unsigned long rp_readl(struct tegra_pcie_port *port, + unsigned long offset) +{ + return readl(port->regs.start + offset); +} + +static void rp_writel(struct tegra_pcie_port *port, unsigned long value, + unsigned long offset) +{ + writel(value, port->regs.start + offset); +} + +static int tegra_pcie_conf_address(struct tegra_pcie *pcie, pci_dev_t bdf, + int where, unsigned long *address) +{ + unsigned int bus = PCI_BUS(bdf); + + if (bus == 0) { + unsigned int dev = PCI_DEV(bdf); + struct tegra_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) { + if (port->index + 1 == dev) { + *address = port->regs.start + (where & ~3); + return 0; + } + } + return -EFAULT; + } else { +#ifdef CONFIG_TEGRA20 + unsigned int dev = PCI_DEV(bdf); + if (dev != 0) + return -EFAULT; +#endif + + *address = pcie->cs.start + + (PCI_CONF1_EXT_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), + PCI_FUNC(bdf), where) & ~PCI_CONF1_ENABLE); + return 0; + } +} + +static int pci_tegra_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct tegra_pcie *pcie = dev_get_priv(bus); + unsigned long address, value; + int err; + + err = tegra_pcie_conf_address(pcie, bdf, offset, &address); + if (err < 0) { + value = 0xffffffff; + goto done; + } + + value = readl(address); + +#ifdef CONFIG_TEGRA20 + /* fixup root port class */ + if (PCI_BUS(bdf) == 0) { + if ((offset & ~3) == PCI_CLASS_REVISION) { + value &= ~0x00ffff00; + value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + } + } +#endif + +done: + *valuep = pci_conv_32_to_size(value, offset, size); + + return 0; +} + +static int pci_tegra_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct tegra_pcie *pcie = dev_get_priv(bus); + unsigned long address; + ulong old; + int err; + + err = tegra_pcie_conf_address(pcie, bdf, offset, &address); + if (err < 0) + return 0; + + old = readl(address); + value = pci_conv_size_to_32(old, value, offset, size); + writel(value, address); + + return 0; +} + +static int tegra_pcie_port_parse_dt(ofnode node, struct tegra_pcie_port *port) +{ + const u32 *addr; + int len; + + addr = ofnode_get_property(node, "assigned-addresses", &len); + if (!addr) { + pr_err("property \"assigned-addresses\" not found"); + return -FDT_ERR_NOTFOUND; + } + + port->regs.start = fdt32_to_cpu(addr[2]); + port->regs.end = port->regs.start + fdt32_to_cpu(addr[4]); + + return 0; +} + +static int tegra_pcie_get_xbar_config(ofnode node, u32 lanes, + enum tegra_pci_id id, unsigned long *xbar) +{ + switch (id) { + case TEGRA20_PCIE: + switch (lanes) { + case 0x00000004: + debug("single-mode configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; + return 0; + + case 0x00000202: + debug("dual-mode configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; + return 0; + } + break; + case TEGRA30_PCIE: + switch (lanes) { + case 0x00000204: + debug("4x1, 2x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; + return 0; + + case 0x00020202: + debug("2x3 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; + return 0; + + case 0x00010104: + debug("4x1, 1x2 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; + return 0; + } + break; + case TEGRA124_PCIE: + case TEGRA210_PCIE: + switch (lanes) { + case 0x0000104: + debug("4x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; + return 0; + + case 0x0000102: + debug("2x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; + return 0; + } + break; + case TEGRA186_PCIE: + switch (lanes) { + case 0x0010004: + debug("x4 x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_401; + return 0; + + case 0x0010102: + debug("x2 x1 x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_211; + return 0; + + case 0x0010101: + debug("x1 x1 x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_T186_111; + return 0; + } + break; + default: + break; + } + + return -FDT_ERR_NOTFOUND; +} + +static int tegra_pcie_parse_port_info(ofnode node, uint *index, uint *lanes) +{ + struct fdt_pci_addr addr; + int err; + + err = ofnode_read_u32_default(node, "nvidia,num-lanes", -1); + if (err < 0) { + pr_err("failed to parse \"nvidia,num-lanes\" property\n"); + return err; + } + + *lanes = err; + + err = ofnode_read_pci_addr(node, 0, "reg", &addr, NULL); + if (err < 0) { + pr_err("failed to parse \"reg\" property\n"); + return err; + } + + *index = PCI_DEV(addr.phys_hi) - 1; + + return 0; +} + +int __weak tegra_pcie_board_init(void) +{ + return 0; +} + +static int tegra_pcie_parse_dt(struct udevice *dev, enum tegra_pci_id id, + struct tegra_pcie *pcie) +{ + ofnode subnode; + u32 lanes = 0; + int err; + + err = dev_read_resource(dev, 0, &pcie->pads); + if (err < 0) { + pr_err("resource \"pads\" not found"); + return err; + } + + err = dev_read_resource(dev, 1, &pcie->afi); + if (err < 0) { + pr_err("resource \"afi\" not found"); + return err; + } + + err = dev_read_resource(dev, 2, &pcie->cs); + if (err < 0) { + pr_err("resource \"cs\" not found"); + return err; + } + + err = tegra_pcie_board_init(); + if (err < 0) { + pr_err("tegra_pcie_board_init() failed: err=%d", err); + return err; + } + +#ifndef CONFIG_TEGRA186 + pcie->phy = tegra_xusb_phy_get(TEGRA_XUSB_PADCTL_PCIE); + if (pcie->phy) { + err = tegra_xusb_phy_prepare(pcie->phy); + if (err < 0) { + pr_err("failed to prepare PHY: %d", err); + return err; + } + } +#endif + + dev_for_each_subnode(subnode, dev) { + unsigned int index = 0, num_lanes = 0; + struct tegra_pcie_port *port; + + err = tegra_pcie_parse_port_info(subnode, &index, &num_lanes); + if (err < 0) { + pr_err("failed to obtain root port info"); + continue; + } + + lanes |= num_lanes << (index << 3); + + if (!ofnode_is_enabled(subnode)) + continue; + + port = malloc(sizeof(*port)); + if (!port) + continue; + + memset(port, 0, sizeof(*port)); + port->num_lanes = num_lanes; + port->index = index; + + err = tegra_pcie_port_parse_dt(subnode, port); + if (err < 0) { + free(port); + continue; + } + + list_add_tail(&port->list, &pcie->ports); + port->pcie = pcie; + } + + err = tegra_pcie_get_xbar_config(dev_ofnode(dev), lanes, id, + &pcie->xbar); + if (err < 0) { + pr_err("invalid lane configuration"); + return err; + } + + return 0; +} + +#ifdef CONFIG_TEGRA186 +static int tegra_pcie_power_on(struct tegra_pcie *pcie) +{ + int ret; + + ret = power_domain_on(&pcie->pwrdom); + if (ret) { + pr_err("power_domain_on() failed: %d\n", ret); + return ret; + } + + ret = clk_enable(&pcie->clk_afi); + if (ret) { + pr_err("clk_enable(afi) failed: %d\n", ret); + return ret; + } + + ret = clk_enable(&pcie->clk_pex); + if (ret) { + pr_err("clk_enable(pex) failed: %d\n", ret); + return ret; + } + + ret = reset_deassert(&pcie->reset_afi); + if (ret) { + pr_err("reset_deassert(afi) failed: %d\n", ret); + return ret; + } + + ret = reset_deassert(&pcie->reset_pex); + if (ret) { + pr_err("reset_deassert(pex) failed: %d\n", ret); + return ret; + } + + return 0; +} +#else +static int tegra_pcie_power_on(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + unsigned long value; + int err; + + /* reset PCIEXCLK logic, AFI controller and PCIe controller */ + reset_set_enable(PERIPH_ID_PCIEXCLK, 1); + reset_set_enable(PERIPH_ID_AFI, 1); + reset_set_enable(PERIPH_ID_PCIE, 1); + + err = tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); + if (err < 0) { + pr_err("failed to power off PCIe partition: %d", err); + return err; + } + + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, + PERIPH_ID_PCIE); + if (err < 0) { + pr_err("failed to power up PCIe partition: %d", err); + return err; + } + + /* take AFI controller out of reset */ + reset_set_enable(PERIPH_ID_AFI, 0); + + /* enable AFI clock */ + clock_enable(PERIPH_ID_AFI); + + if (soc->has_cml_clk) { + /* enable CML clock */ + value = readl(NV_PA_CLK_RST_BASE + 0x48c); + value |= (1 << 0); + value &= ~(1 << 1); + writel(value, NV_PA_CLK_RST_BASE + 0x48c); + } + + err = tegra_plle_enable(); + if (err < 0) { + pr_err("failed to enable PLLE: %d\n", err); + return err; + } + + return 0; +} + +static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + unsigned long start = get_timer(0); + u32 value; + + while (get_timer(start) < timeout) { + value = pads_readl(pcie, soc->pads_pll_ctl); + if (value & PADS_PLL_CTL_LOCKDET) + return 0; + } + + return -ETIMEDOUT; +} + +static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + u32 value; + int err; + + /* initialize internal PHY, enable up to 16 PCIe lanes */ + pads_writel(pcie, 0, PADS_CTL_SEL); + + /* override IDDQ to 1 on all 4 lanes */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_IDDQ_1L; + pads_writel(pcie, value, PADS_CTL); + + /* + * Set up PHY PLL inputs select PLLE output as refclock, set TX + * ref sel to div10 (not div5). + */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); + value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; + pads_writel(pcie, value, soc->pads_pll_ctl); + + /* reset PLL */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + udelay(20); + + /* take PLL out of reset */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value |= PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + /* wait for the PLL to lock */ + err = tegra_pcie_pll_wait(pcie, 500); + if (err < 0) { + pr_err("PLL failed to lock: %d", err); + return err; + } + + /* turn off IDDQ override */ + value = pads_readl(pcie, PADS_CTL); + value &= ~PADS_CTL_IDDQ_1L; + pads_writel(pcie, value, PADS_CTL); + + /* enable TX/RX data */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; + pads_writel(pcie, value, PADS_CTL); + + return 0; +} +#endif + +static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + struct tegra_pcie_port *port; + u32 value; + int err; + +#ifdef CONFIG_TEGRA186 + { +#else + if (pcie->phy) { +#endif + value = afi_readl(pcie, AFI_PLLE_CONTROL); + value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; + value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; + afi_writel(pcie, value, AFI_PLLE_CONTROL); + } + + if (soc->has_pex_bias_ctrl) + afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); + + value = afi_readl(pcie, AFI_PCIE_CONFIG); + value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; + value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar; + + list_for_each_entry(port, &pcie->ports, list) + value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + + afi_writel(pcie, value, AFI_PCIE_CONFIG); + + value = afi_readl(pcie, AFI_FUSE); + + if (soc->has_gen2) + value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; + else + value |= AFI_FUSE_PCIE_T0_GEN2_DIS; + + afi_writel(pcie, value, AFI_FUSE); + +#ifndef CONFIG_TEGRA186 + if (pcie->phy) + err = tegra_xusb_phy_enable(pcie->phy); + else + err = tegra_pcie_phy_enable(pcie); + + if (err < 0) { + pr_err("failed to power on PHY: %d\n", err); + return err; + } +#endif + + /* take the PCIEXCLK logic out of reset */ +#ifdef CONFIG_TEGRA186 + err = reset_deassert(&pcie->reset_pcie_x); + if (err) { + pr_err("reset_deassert(pcie_x) failed: %d\n", err); + return err; + } +#else + reset_set_enable(PERIPH_ID_PCIEXCLK, 0); +#endif + + /* finally enable PCIe */ + value = afi_readl(pcie, AFI_CONFIGURATION); + value |= AFI_CONFIGURATION_EN_FPCI; + afi_writel(pcie, value, AFI_CONFIGURATION); + + /* disable all interrupts */ + afi_writel(pcie, 0, AFI_AFI_INTR_ENABLE); + afi_writel(pcie, 0, AFI_SM_INTR_ENABLE); + afi_writel(pcie, 0, AFI_INTR_MASK); + afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); + + return 0; +} + +static int tegra_pcie_setup_translations(struct udevice *bus) +{ + struct tegra_pcie *pcie = dev_get_priv(bus); + unsigned long fpci, axi, size; + struct pci_region *io, *mem, *pref; + int count; + + /* BAR 0: type 1 extended configuration space */ + fpci = 0xfe100000; + size = resource_size(&pcie->cs); + axi = pcie->cs.start; + + afi_writel(pcie, axi, AFI_AXI_BAR0_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); + afi_writel(pcie, fpci, AFI_FPCI_BAR0); + + count = pci_get_regions(bus, &io, &mem, &pref); + if (count != 3) + return -EINVAL; + + /* BAR 1: downstream I/O */ + fpci = 0xfdfc0000; + size = io->size; + axi = io->phys_start; + + afi_writel(pcie, axi, AFI_AXI_BAR1_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); + afi_writel(pcie, fpci, AFI_FPCI_BAR1); + + /* BAR 2: prefetchable memory */ + fpci = (((pref->phys_start >> 12) & 0x0fffffff) << 4) | 0x1; + size = pref->size; + axi = pref->phys_start; + + afi_writel(pcie, axi, AFI_AXI_BAR2_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); + afi_writel(pcie, fpci, AFI_FPCI_BAR2); + + /* BAR 3: non-prefetchable memory */ + fpci = (((mem->phys_start >> 12) & 0x0fffffff) << 4) | 0x1; + size = mem->size; + axi = mem->phys_start; + + afi_writel(pcie, axi, AFI_AXI_BAR3_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); + afi_writel(pcie, fpci, AFI_FPCI_BAR3); + + /* NULL out the remaining BARs as they are not used */ + afi_writel(pcie, 0, AFI_AXI_BAR4_START); + afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); + afi_writel(pcie, 0, AFI_FPCI_BAR4); + + afi_writel(pcie, 0, AFI_AXI_BAR5_START); + afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); + afi_writel(pcie, 0, AFI_FPCI_BAR5); + + /* map all upstream transactions as uncached */ + afi_writel(pcie, NV_PA_SDRAM_BASE, AFI_CACHE_BAR0_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); + afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + + /* MSI translations are setup only when needed */ + afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); + afi_writel(pcie, 0, AFI_MSI_BAR_SZ); + afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); + afi_writel(pcie, 0, AFI_MSI_BAR_SZ); + + return 0; +} + +static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) +{ + unsigned long ret = 0; + + switch (port->index) { + case 0: + ret = AFI_PEX0_CTRL; + break; + + case 1: + ret = AFI_PEX1_CTRL; + break; + + case 2: + ret = port->pcie->soc->afi_pex2_ctrl; + break; + } + + return ret; +} + +void tegra_pcie_port_reset(struct tegra_pcie_port *port) +{ + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + unsigned long value; + + /* pulse reset signel */ + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + + udelay(2000); + + value = afi_readl(port->pcie, ctrl); + value |= AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); +} + +int tegra_pcie_port_index_of_port(struct tegra_pcie_port *port) +{ + return port->index; +} + +void __weak tegra_pcie_board_port_reset(struct tegra_pcie_port *port) +{ + tegra_pcie_port_reset(port); +} + +static void tegra_pcie_port_enable(struct tegra_pcie_port *port) +{ + struct tegra_pcie *pcie = port->pcie; + const struct tegra_pcie_soc *soc = pcie->soc; + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + unsigned long value; + + /* enable reference clock */ + value = afi_readl(pcie, ctrl); + value |= AFI_PEX_CTRL_REFCLK_EN; + + if (pcie->soc->has_pex_clkreq_en) + value |= AFI_PEX_CTRL_CLKREQ_EN; + + value |= AFI_PEX_CTRL_OVERRIDE_EN; + + afi_writel(pcie, value, ctrl); + + tegra_pcie_board_port_reset(port); + + if (soc->force_pca_enable) { + value = rp_readl(port, RP_VEND_CTL2); + value |= RP_VEND_CTL2_PCA_ENABLE; + rp_writel(port, value, RP_VEND_CTL2); + } + + /* configure the reference clock driver */ + pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); + if (soc->num_ports > 2) + pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); +} + +static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) +{ + unsigned int retries = 3; + unsigned long value; + + value = rp_readl(port, RP_PRIV_MISC); + value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; + value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; + rp_writel(port, value, RP_PRIV_MISC); + + do { + unsigned int timeout = 200; + + do { + value = rp_readl(port, RP_VEND_XP); + if (value & RP_VEND_XP_DL_UP) + break; + + udelay(2000); + } while (--timeout); + + if (!timeout) { + debug("link %u down, retrying\n", port->index); + goto retry; + } + + timeout = 200; + + do { + value = rp_readl(port, RP_LINK_CONTROL_STATUS); + if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) + return true; + + udelay(2000); + } while (--timeout); + +retry: + tegra_pcie_board_port_reset(port); + } while (--retries); + + return false; +} + +static void tegra_pcie_port_disable(struct tegra_pcie_port *port) +{ + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + unsigned long value; + + /* assert port reset */ + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + + /* disable reference clock */ + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_REFCLK_EN; + afi_writel(port->pcie, value, ctrl); +} + +static void tegra_pcie_port_free(struct tegra_pcie_port *port) +{ + list_del(&port->list); + free(port); +} + +static int tegra_pcie_enable(struct tegra_pcie *pcie) +{ + struct tegra_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + debug("probing port %u, using %u lanes\n", port->index, + port->num_lanes); + + tegra_pcie_port_enable(port); + + if (tegra_pcie_port_check_link(port)) + continue; + + debug("link %u down, ignoring\n", port->index); + + tegra_pcie_port_disable(port); + tegra_pcie_port_free(port); + } + + return 0; +} + +static const struct tegra_pcie_soc pci_tegra_soc[] = { + [TEGRA20_PCIE] = { + .num_ports = 2, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, + .pads_refclk_cfg0 = 0xfa5cfa5c, + .has_pex_clkreq_en = false, + .has_pex_bias_ctrl = false, + .has_cml_clk = false, + .has_gen2 = false, + }, + [TEGRA30_PCIE] = { + .num_ports = 3, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .afi_pex2_ctrl = AFI_PEX2_CTRL, + .pads_refclk_cfg0 = 0xfa5cfa5c, + .pads_refclk_cfg1 = 0xfa5cfa5c, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_cml_clk = true, + .has_gen2 = false, + }, + [TEGRA124_PCIE] = { + .num_ports = 2, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x44ac44ac, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_cml_clk = true, + .has_gen2 = true, + }, + [TEGRA210_PCIE] = { + .num_ports = 2, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x90b890b8, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_cml_clk = true, + .has_gen2 = true, + .force_pca_enable = true, + }, + [TEGRA186_PCIE] = { + .num_ports = 3, + .afi_pex2_ctrl = AFI_PEX2_CTRL_T186, + .pads_refclk_cfg0 = 0x80b880b8, + .pads_refclk_cfg1 = 0x000480b8, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_gen2 = true, + }, +}; + +static int pci_tegra_of_to_plat(struct udevice *dev) +{ + struct tegra_pcie *pcie = dev_get_priv(dev); + enum tegra_pci_id id; + + id = dev_get_driver_data(dev); + pcie->soc = &pci_tegra_soc[id]; + + INIT_LIST_HEAD(&pcie->ports); + + if (tegra_pcie_parse_dt(dev, id, pcie)) + return -EINVAL; + + return 0; +} + +static int pci_tegra_probe(struct udevice *dev) +{ + struct tegra_pcie *pcie = dev_get_priv(dev); + int err; + +#ifdef CONFIG_TEGRA186 + err = clk_get_by_name(dev, "afi", &pcie->clk_afi); + if (err) { + debug("clk_get_by_name(afi) failed: %d\n", err); + return err; + } + + err = clk_get_by_name(dev, "pex", &pcie->clk_pex); + if (err) { + debug("clk_get_by_name(pex) failed: %d\n", err); + return err; + } + + err = reset_get_by_name(dev, "afi", &pcie->reset_afi); + if (err) { + debug("reset_get_by_name(afi) failed: %d\n", err); + return err; + } + + err = reset_get_by_name(dev, "pex", &pcie->reset_pex); + if (err) { + debug("reset_get_by_name(pex) failed: %d\n", err); + return err; + } + + err = reset_get_by_name(dev, "pcie_x", &pcie->reset_pcie_x); + if (err) { + debug("reset_get_by_name(pcie_x) failed: %d\n", err); + return err; + } + + err = power_domain_get(dev, &pcie->pwrdom); + if (err) { + debug("power_domain_get() failed: %d\n", err); + return err; + } +#endif + + err = tegra_pcie_power_on(pcie); + if (err < 0) { + pr_err("failed to power on"); + return err; + } + + err = tegra_pcie_enable_controller(pcie); + if (err < 0) { + pr_err("failed to enable controller"); + return err; + } + + err = tegra_pcie_setup_translations(dev); + if (err < 0) { + pr_err("failed to decode ranges"); + return err; + } + + err = tegra_pcie_enable(pcie); + if (err < 0) { + pr_err("failed to enable PCIe"); + return err; + } + + return 0; +} + +static const struct dm_pci_ops pci_tegra_ops = { + .read_config = pci_tegra_read_config, + .write_config = pci_tegra_write_config, +}; + +static const struct udevice_id pci_tegra_ids[] = { + { .compatible = "nvidia,tegra20-pcie", .data = TEGRA20_PCIE }, + { .compatible = "nvidia,tegra30-pcie", .data = TEGRA30_PCIE }, + { .compatible = "nvidia,tegra124-pcie", .data = TEGRA124_PCIE }, + { .compatible = "nvidia,tegra210-pcie", .data = TEGRA210_PCIE }, + { .compatible = "nvidia,tegra186-pcie", .data = TEGRA186_PCIE }, + { } +}; + +U_BOOT_DRIVER(pci_tegra) = { + .name = "pci_tegra", + .id = UCLASS_PCI, + .of_match = pci_tegra_ids, + .ops = &pci_tegra_ops, + .of_to_plat = pci_tegra_of_to_plat, + .probe = pci_tegra_probe, + .priv_auto = sizeof(struct tegra_pcie), +}; diff --git a/drivers/pci/pci_x86.c b/drivers/pci/pci_x86.c new file mode 100644 index 00000000000..ab76166451c --- /dev/null +++ b/drivers/pci/pci_x86.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2015 Google, Inc + */ + +#include <dm.h> +#include <pci.h> +#include <asm/pci.h> + +static int _pci_x86_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_x86_read_config(bdf, offset, valuep, size); +} + +static int _pci_x86_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, enum pci_size_t size) +{ + return pci_x86_write_config(bdf, offset, value, size); +} + +static const struct dm_pci_ops pci_x86_ops = { + .read_config = _pci_x86_read_config, + .write_config = _pci_x86_write_config, +}; + +static const struct udevice_id pci_x86_ids[] = { + { .compatible = "pci-x86" }, + { } +}; + +U_BOOT_DRIVER(pci_x86) = { + .name = "pci_x86", + .id = UCLASS_PCI, + .of_match = pci_x86_ids, + .ops = &pci_x86_ops, +}; diff --git a/drivers/pci/pcie-xilinx-nwl.c b/drivers/pci/pcie-xilinx-nwl.c new file mode 100644 index 00000000000..7ef2bdf57b5 --- /dev/null +++ b/drivers/pci/pcie-xilinx-nwl.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host bridge driver for Xilinx / AMD ZynqMP NWL PCIe Bridge + * + * Based on the Linux driver which is: + * (C) Copyright 2014 - 2015, Xilinx, Inc. + * + * Author: Stefan Roese <sr@denx.de> + */ + +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <mapmem.h> +#include <pci.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/ioport.h> + +/* Bridge core config registers */ +#define BRCFG_PCIE_RX0 0x00000000 +#define BRCFG_PCIE_RX1 0x00000004 +#define BRCFG_INTERRUPT 0x00000010 +#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 + +/* Egress - Bridge translation registers */ +#define E_BREG_CAPABILITIES 0x00000200 +#define E_BREG_CONTROL 0x00000208 +#define E_BREG_BASE_LO 0x00000210 +#define E_BREG_BASE_HI 0x00000214 +#define E_ECAM_CAPABILITIES 0x00000220 +#define E_ECAM_CONTROL 0x00000228 +#define E_ECAM_BASE_LO 0x00000230 +#define E_ECAM_BASE_HI 0x00000234 + +#define I_ISUB_CONTROL 0x000003E8 +#define SET_ISUB_CONTROL BIT(0) +/* Rxed msg fifo - Interrupt status registers */ +#define MSGF_MISC_STATUS 0x00000400 +#define MSGF_MISC_MASK 0x00000404 +#define MSGF_LEG_STATUS 0x00000420 +#define MSGF_LEG_MASK 0x00000424 +#define MSGF_MSI_STATUS_LO 0x00000440 +#define MSGF_MSI_STATUS_HI 0x00000444 +#define MSGF_MSI_MASK_LO 0x00000448 +#define MSGF_MSI_MASK_HI 0x0000044C + +/* Msg filter mask bits */ +#define CFG_ENABLE_PM_MSG_FWD BIT(1) +#define CFG_ENABLE_INT_MSG_FWD BIT(2) +#define CFG_ENABLE_ERR_MSG_FWD BIT(3) +#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ + CFG_ENABLE_INT_MSG_FWD | \ + CFG_ENABLE_ERR_MSG_FWD) + +/* Misc interrupt status mask bits */ +#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) +#define MSGF_MISC_SR_RXMSG_OVER BIT(1) +#define MSGF_MISC_SR_SLAVE_ERR BIT(4) +#define MSGF_MISC_SR_MASTER_ERR BIT(5) +#define MSGF_MISC_SR_I_ADDR_ERR BIT(6) +#define MSGF_MISC_SR_E_ADDR_ERR BIT(7) +#define MSGF_MISC_SR_FATAL_AER BIT(16) +#define MSGF_MISC_SR_NON_FATAL_AER BIT(17) +#define MSGF_MISC_SR_CORR_AER BIT(18) +#define MSGF_MISC_SR_UR_DETECT BIT(20) +#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) +#define MSGF_MISC_SR_FATAL_DEV BIT(23) +#define MSGF_MISC_SR_LINK_DOWN BIT(24) +#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) + +#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ + MSGF_MISC_SR_RXMSG_OVER | \ + MSGF_MISC_SR_SLAVE_ERR | \ + MSGF_MISC_SR_MASTER_ERR | \ + MSGF_MISC_SR_I_ADDR_ERR | \ + MSGF_MISC_SR_E_ADDR_ERR | \ + MSGF_MISC_SR_FATAL_AER | \ + MSGF_MISC_SR_NON_FATAL_AER | \ + MSGF_MISC_SR_CORR_AER | \ + MSGF_MISC_SR_UR_DETECT | \ + MSGF_MISC_SR_NON_FATAL_DEV | \ + MSGF_MISC_SR_FATAL_DEV | \ + MSGF_MISC_SR_LINK_DOWN | \ + MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MSIC_SR_LINK_BWIDTH) + +/* Legacy interrupt status mask bits */ +#define MSGF_LEG_SR_INTA BIT(0) +#define MSGF_LEG_SR_INTB BIT(1) +#define MSGF_LEG_SR_INTC BIT(2) +#define MSGF_LEG_SR_INTD BIT(3) +#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ + MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) + +/* MSI interrupt status mask bits */ +#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) +#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) + +/* Bridge config interrupt mask */ +#define BRCFG_INTERRUPT_MASK BIT(0) +#define BREG_PRESENT BIT(0) +#define BREG_ENABLE BIT(0) +#define BREG_ENABLE_FORCE BIT(1) + +/* E_ECAM status mask bits */ +#define E_ECAM_PRESENT BIT(0) +#define E_ECAM_CR_ENABLE BIT(0) +#define E_ECAM_SIZE_LOC GENMASK(20, 16) +#define E_ECAM_SIZE_SHIFT 16 +#define NWL_ECAM_VALUE_DEFAULT 12 + +#define CFG_DMA_REG_BAR GENMASK(2, 0) +#define CFG_PCIE_CACHE GENMASK(7, 0) + +/* Readin the PS_LINKUP */ +#define PS_LINKUP_OFFSET 0x00000238 +#define PCIE_PHY_LINKUP_BIT BIT(0) +#define PHY_RDY_LINKUP_BIT BIT(1) + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +struct nwl_pcie { + struct udevice *dev; + void __iomem *breg_base; + void __iomem *pcireg_base; + void __iomem *ecam_base; + phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ + phys_addr_t phys_ecam_base; /* Physical Configuration Base */ + u32 ecam_value; +}; + +static int nwl_pcie_config_address(const struct udevice *bus, + pci_dev_t bdf, uint offset, + void **paddress) +{ + struct nwl_pcie *pcie = dev_get_priv(bus); + void *addr; + + addr = pcie->ecam_base; + addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf) - dev_seq(bus), + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + *paddress = addr; + + return 0; +} + +static int nwl_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, nwl_pcie_config_address, + bdf, offset, valuep, size); +} + +static int nwl_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, nwl_pcie_config_address, + bdf, offset, value, size); +} + +static const struct dm_pci_ops nwl_pcie_ops = { + .read_config = nwl_pcie_read_config, + .write_config = nwl_pcie_write_config, +}; + +static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) +{ + return readl(pcie->breg_base + off); +} + +static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) +{ + writel(val, pcie->breg_base + off); +} + +static bool nwl_pcie_link_up(struct nwl_pcie *pcie) +{ + if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) + return true; + return false; +} + +static bool nwl_phy_link_up(struct nwl_pcie *pcie) +{ + if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) + return true; + return false; +} + +static int nwl_wait_for_link(struct nwl_pcie *pcie) +{ + struct udevice *dev = pcie->dev; + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (nwl_phy_link_up(pcie)) + return 0; + udelay(LINK_WAIT_USLEEP_MIN); + } + + dev_warn(dev, "PHY link never came up\n"); + return -ETIMEDOUT; +} + +static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) +{ + struct udevice *dev = pcie->dev; + u32 breg_val, ecam_val; + int err; + + breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; + if (!breg_val) { + dev_err(dev, "BREG is not present\n"); + return breg_val; + } + + /* Write bridge_off to breg base */ + nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), + E_BREG_BASE_LO); + nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), + E_BREG_BASE_HI); + + /* Enable BREG */ + nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, + E_BREG_CONTROL); + + /* Disable DMA channel registers */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | + CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); + + /* Enable Ingress subtractive decode translation */ + nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); + + /* Enable msg filtering details */ + nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, + BRCFG_PCIE_RX_MSG_FILTER); + + err = nwl_wait_for_link(pcie); + if (err) + return err; + + ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; + if (!ecam_val) { + dev_err(dev, "ECAM is not present\n"); + return ecam_val; + } + + /* Enable ECAM */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | + E_ECAM_CR_ENABLE, E_ECAM_CONTROL); + + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | + (pcie->ecam_value << E_ECAM_SIZE_SHIFT), + E_ECAM_CONTROL); + + nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), + E_ECAM_BASE_LO); + nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), + E_ECAM_BASE_HI); + + if (nwl_pcie_link_up(pcie)) + dev_info(dev, "Link is UP\n"); + else + dev_info(dev, "Link is DOWN\n"); + + /* Disable all misc interrupts */ + nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); + + /* Clear pending misc interrupts */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & + MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); + + /* Disable all legacy interrupts */ + nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); + + /* Clear pending legacy interrupts */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & + MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); + + return 0; +} + +static int nwl_pcie_parse_dt(struct nwl_pcie *pcie) +{ + struct udevice *dev = pcie->dev; + struct resource res; + int ret; + + ret = dev_read_resource_byname(dev, "breg", &res); + if (ret) + return ret; + pcie->breg_base = devm_ioremap(dev, res.start, resource_size(&res)); + if (IS_ERR(pcie->breg_base)) + return PTR_ERR(pcie->breg_base); + pcie->phys_breg_base = res.start; + + ret = dev_read_resource_byname(dev, "cfg", &res); + if (ret) + return ret; + pcie->ecam_base = devm_ioremap(dev, res.start, resource_size(&res)); + if (IS_ERR(pcie->ecam_base)) + return PTR_ERR(pcie->ecam_base); + pcie->phys_ecam_base = res.start; + + return 0; +} + +static int nwl_pcie_probe(struct udevice *dev) +{ + struct nwl_pcie *pcie = dev_get_priv(dev); + int err; + + pcie->dev = dev; + pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; + + err = nwl_pcie_parse_dt(pcie); + if (err) { + dev_err(dev, "Parsing DT failed\n"); + return err; + } + + err = nwl_pcie_bridge_init(pcie); + if (err) { + dev_err(dev, "HW Initialization failed\n"); + return err; + } + + return 0; +} + +static const struct udevice_id nwl_pcie_of_match[] = { + { .compatible = "xlnx,nwl-pcie-2.11", }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(nwl_pcie) = { + .name = "nwl-pcie", + .id = UCLASS_PCI, + .of_match = nwl_pcie_of_match, + .probe = nwl_pcie_probe, + .priv_auto = sizeof(struct nwl_pcie), + .ops = &nwl_pcie_ops, +}; diff --git a/drivers/pci/pcie_apple.c b/drivers/pci/pcie_apple.c new file mode 100644 index 00000000000..6a8e715d4b6 --- /dev/null +++ b/drivers/pci/pcie_apple.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host bridge driver for Apple system-on-chips. + * + * The HW is ECAM compliant. + * + * Initialization requires enabling power and clocks, along with a + * number of register pokes. + * + * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> + * Copyright (C) 2021 Google LLC + * Copyright (C) 2021 Corellium LLC + * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org> + * + * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io> + * Author: Marc Zyngier <maz@kernel.org> + */ + +#include <dm.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <mapmem.h> +#include <pci.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <linux/delay.h> +#include <linux/iopoll.h> + +#define CORE_RC_PHYIF_CTL 0x00024 +#define CORE_RC_PHYIF_CTL_RUN BIT(0) +#define CORE_RC_PHYIF_STAT 0x00028 +#define CORE_RC_PHYIF_STAT_REFCLK BIT(4) +#define CORE_RC_CTL 0x00050 +#define CORE_RC_CTL_RUN BIT(0) +#define CORE_RC_STAT 0x00058 +#define CORE_RC_STAT_READY BIT(0) +#define CORE_FABRIC_STAT 0x04000 +#define CORE_FABRIC_STAT_MASK 0x001F001F + +#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port)) + +#define PHY_LANE_CFG 0x00000 +#define PHY_LANE_CFG_REFCLK0REQ BIT(0) +#define PHY_LANE_CFG_REFCLK1REQ BIT(1) +#define PHY_LANE_CFG_REFCLK0ACK BIT(2) +#define PHY_LANE_CFG_REFCLK1ACK BIT(3) +#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10)) +#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31)) +#define PHY_LANE_CTL 0x00004 +#define PHY_LANE_CTL_CFGACC BIT(15) + +#define PORT_LTSSMCTL 0x00080 +#define PORT_LTSSMCTL_START BIT(0) +#define PORT_INTSTAT 0x00100 +#define PORT_INT_TUNNEL_ERR 31 +#define PORT_INT_CPL_TIMEOUT 23 +#define PORT_INT_RID2SID_MAPERR 22 +#define PORT_INT_CPL_ABORT 21 +#define PORT_INT_MSI_BAD_DATA 19 +#define PORT_INT_MSI_ERR 18 +#define PORT_INT_REQADDR_GT32 17 +#define PORT_INT_AF_TIMEOUT 15 +#define PORT_INT_LINK_DOWN 14 +#define PORT_INT_LINK_UP 12 +#define PORT_INT_LINK_BWMGMT 11 +#define PORT_INT_AER_MASK (15 << 4) +#define PORT_INT_PORT_ERR 4 +#define PORT_INT_INTx(i) i +#define PORT_INT_INTx_MASK 15 +#define PORT_INTMSK 0x00104 +#define PORT_INTMSKSET 0x00108 +#define PORT_INTMSKCLR 0x0010c +#define PORT_MSICFG 0x00124 +#define PORT_MSICFG_EN BIT(0) +#define PORT_MSICFG_L2MSINUM_SHIFT 4 +#define PORT_MSIBASE 0x00128 +#define PORT_MSIBASE_1_SHIFT 16 +#define PORT_MSIADDR 0x00168 +#define PORT_LINKSTS 0x00208 +#define PORT_LINKSTS_UP BIT(0) +#define PORT_LINKSTS_BUSY BIT(2) +#define PORT_LINKCMDSTS 0x00210 +#define PORT_OUTS_NPREQS 0x00284 +#define PORT_OUTS_NPREQS_REQ BIT(24) +#define PORT_OUTS_NPREQS_CPL BIT(16) +#define PORT_RXWR_FIFO 0x00288 +#define PORT_RXWR_FIFO_HDR GENMASK(15, 10) +#define PORT_RXWR_FIFO_DATA GENMASK(9, 0) +#define PORT_RXRD_FIFO 0x0028C +#define PORT_RXRD_FIFO_REQ GENMASK(6, 0) +#define PORT_OUTS_CPLS 0x00290 +#define PORT_OUTS_CPLS_SHRD GENMASK(14, 8) +#define PORT_OUTS_CPLS_WAIT GENMASK(6, 0) +#define PORT_APPCLK 0x00800 +#define PORT_APPCLK_EN BIT(0) +#define PORT_APPCLK_CGDIS BIT(8) +#define PORT_STATUS 0x00804 +#define PORT_STATUS_READY BIT(0) +#define PORT_REFCLK 0x00810 +#define PORT_REFCLK_EN BIT(0) +#define PORT_REFCLK_CGDIS BIT(8) +#define PORT_PERST 0x00814 +#define PORT_PERST_OFF BIT(0) +#define PORT_RID2SID(i16) (0x00828 + 4 * (i16)) +#define PORT_RID2SID_VALID BIT(31) +#define PORT_RID2SID_SID_SHIFT 16 +#define PORT_RID2SID_BUS_SHIFT 8 +#define PORT_RID2SID_DEV_SHIFT 3 +#define PORT_RID2SID_FUNC_SHIFT 0 +#define PORT_OUTS_PREQS_HDR 0x00980 +#define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0) +#define PORT_OUTS_PREQS_DATA 0x00984 +#define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0) +#define PORT_TUNCTRL 0x00988 +#define PORT_TUNCTRL_PERST_ON BIT(0) +#define PORT_TUNCTRL_PERST_ACK_REQ BIT(1) +#define PORT_TUNSTAT 0x0098c +#define PORT_TUNSTAT_PERST_ON BIT(0) +#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1) +#define PORT_PREFMEM_ENABLE 0x00994 + +struct reg_info { + u32 phy_lane_ctl; + u32 port_refclk; + u32 port_perst; +}; + +const struct reg_info t8103_hw = { + .phy_lane_ctl = PHY_LANE_CTL, + .port_refclk = PORT_REFCLK, + .port_perst = PORT_PERST, +}; + +#define PORT_T602X_PERST 0x082c + +const struct reg_info t602x_hw = { + .phy_lane_ctl = 0, + .port_refclk = 0, + .port_perst = PORT_T602X_PERST, +}; + +struct apple_pcie_priv { + struct udevice *dev; + void __iomem *base; + void __iomem *cfg_base; + struct list_head ports; + const struct reg_info *hw; +}; + +struct apple_pcie_port { + struct apple_pcie_priv *pcie; + struct gpio_desc reset; + ofnode np; + void __iomem *base; + void __iomem *phy; + struct list_head entry; + int idx; +}; + +static void rmw_set(u32 set, void __iomem *addr) +{ + writel_relaxed(readl_relaxed(addr) | set, addr); +} + +static void rmw_clear(u32 clr, void __iomem *addr) +{ + writel_relaxed(readl_relaxed(addr) & ~clr, addr); +} + +static int apple_pcie_config_address(const struct udevice *bus, + pci_dev_t bdf, uint offset, + void **paddress) +{ + struct apple_pcie_priv *pcie = dev_get_priv(bus); + void *addr; + + addr = pcie->cfg_base; + addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf), PCI_DEV(bdf), + PCI_FUNC(bdf), offset); + *paddress = addr; + + return 0; +} + +static int apple_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + int ret; + + ret = pci_generic_mmap_read_config(bus, apple_pcie_config_address, + bdf, offset, valuep, size); + return ret; +} + +static int apple_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, apple_pcie_config_address, + bdf, offset, value, size); +} + +static const struct dm_pci_ops apple_pcie_ops = { + .read_config = apple_pcie_read_config, + .write_config = apple_pcie_write_config, +}; + +static int apple_pcie_setup_refclk(struct apple_pcie_priv *pcie, + struct apple_pcie_port *port) +{ + u32 stat; + int res; + + if (pcie->hw->phy_lane_ctl) + rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl); + + rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG); + + res = readl_poll_sleep_timeout(port->phy + PHY_LANE_CFG, + stat, stat & PHY_LANE_CFG_REFCLK0ACK, + 100, 50000); + if (res < 0) + return res; + + rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG); + res = readl_poll_sleep_timeout(port->phy + PHY_LANE_CFG, + stat, stat & PHY_LANE_CFG_REFCLK1ACK, + 100, 50000); + + if (res < 0) + return res; + + if (pcie->hw->phy_lane_ctl) + rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl); + + rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG); + + if (pcie->hw->port_refclk) + rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk); + + return 0; +} + +static int apple_pcie_setup_port(struct apple_pcie_priv *pcie, ofnode np) +{ + struct apple_pcie_port *port; + struct gpio_desc reset; + fdt_addr_t addr; + u32 stat, idx; + int ret; + char name[16]; + + ret = gpio_request_by_name_nodev(np, "reset-gpios", 0, &reset, 0); + if (ret) + return ret; + + port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = ofnode_read_u32_index(np, "reg", 0, &idx); + if (ret) + return ret; + + /* Use the first reg entry to work out the port index */ + port->idx = idx >> 11; + port->pcie = pcie; + port->reset = reset; + port->np = np; + + snprintf(name, sizeof(name), "port%d", port->idx); + addr = dev_read_addr_name(pcie->dev, name); + if (addr == FDT_ADDR_T_NONE) + addr = dev_read_addr_index(pcie->dev, port->idx + 2); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + port->base = map_sysmem(addr, 0); + + snprintf(name, sizeof(name), "phy%d", port->idx); + addr = dev_read_addr_name(pcie->dev, name); + if (addr == FDT_ADDR_T_NONE) + port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx); + else + port->phy = map_sysmem(addr, 0); + + rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); + + /* Assert PERST# before setting up the clock */ + dm_gpio_set_value(&reset, 1); + + ret = apple_pcie_setup_refclk(pcie, port); + if (ret < 0) + return ret; + + /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */ + udelay(100); + + /* Deassert PERST# */ + rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst); + dm_gpio_set_value(&reset, 0); + + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ + udelay(100 * 1000); + + ret = readl_poll_sleep_timeout(port->base + PORT_STATUS, stat, + stat & PORT_STATUS_READY, 100, 250000); + if (ret < 0) { + dev_err(pcie->dev, "port %d ready wait timeout\n", port->idx); + return ret; + } + + list_add_tail(&port->entry, &pcie->ports); + + writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL); + + /* + * Deliberately ignore the link not coming up as connected + * devices (e.g. the WiFi controller) may not be powerd up. + */ + readl_poll_sleep_timeout(port->base + PORT_LINKSTS, stat, + (stat & PORT_LINKSTS_UP), 100, 100000); + + if (pcie->hw->port_refclk) + rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK); + else + rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG); + rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK); + + return 0; +} + +static int apple_pcie_probe(struct udevice *dev) +{ + struct apple_pcie_priv *pcie = dev_get_priv(dev); + fdt_addr_t addr; + ofnode of_port; + int i, ret; + + pcie->hw = (struct reg_info *)dev_get_driver_data(dev); + + pcie->dev = dev; + addr = dev_read_addr_index(dev, 0); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + pcie->cfg_base = map_sysmem(addr, 0); + + addr = dev_read_addr_index(dev, 1); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + pcie->base = map_sysmem(addr, 0); + + INIT_LIST_HEAD(&pcie->ports); + + for (of_port = ofnode_first_subnode(dev_ofnode(dev)); + ofnode_valid(of_port); + of_port = ofnode_next_subnode(of_port)) { + if (!ofnode_is_enabled(of_port)) + continue; + ret = apple_pcie_setup_port(pcie, of_port); + if (ret) { + dev_err(pcie->dev, "Port %d setup fail: %d\n", i, ret); + return ret; + } + } + + return 0; +} + +static int apple_pcie_remove(struct udevice *dev) +{ + struct apple_pcie_priv *pcie = dev_get_priv(dev); + struct apple_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, entry) { + gpio_free_list_nodev(&port->reset, 1); + free(port); + } + + return 0; +} + +static const struct udevice_id apple_pcie_of_match[] = { + { .compatible = "apple,t6020-pcie", .data = (ulong)&t602x_hw }, + { .compatible = "apple,pcie", .data = (ulong)&t8103_hw }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(apple_pcie) = { + .name = "apple_pcie", + .id = UCLASS_PCI, + .of_match = apple_pcie_of_match, + .probe = apple_pcie_probe, + .remove = apple_pcie_remove, + .priv_auto = sizeof(struct apple_pcie_priv), + .ops = &apple_pcie_ops, +}; diff --git a/drivers/pci/pcie_brcmstb.c b/drivers/pci/pcie_brcmstb.c new file mode 100644 index 00000000000..f978c64365c --- /dev/null +++ b/drivers/pci/pcie_brcmstb.c @@ -0,0 +1,653 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom STB PCIe controller driver + * + * Copyright (C) 2020 Samsung Electronics Co., Ltd. + * + * Based on upstream Linux kernel driver: + * drivers/pci/controller/pcie-brcmstb.c + * Copyright (C) 2009 - 2017 Broadcom + * + * Based driver by Nicolas Saenz Julienne + * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de> + */ + +#include <errno.h> +#include <dm.h> +#include <dm/ofnode.h> +#include <pci.h> +#include <asm/io.h> +#include <linux/bitfield.h> +#include <linux/log2.h> +#include <linux/iopoll.h> + +/* Offset of the mandatory PCIe capability config registers */ +#define BRCM_PCIE_CAP_REGS 0x00ac + +/* The PCIe controller register offsets */ +#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1 0x0188 +#define VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc +#define VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0 + +#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c +#define CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff + +#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc +#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 + +#define PCIE_RC_DL_MDIO_ADDR 0x1100 +#define PCIE_RC_DL_MDIO_WR_DATA 0x1104 +#define PCIE_RC_DL_MDIO_RD_DATA 0x1108 + +#define PCIE_MISC_MISC_CTRL 0x4008 +#define MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000 +#define MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000 +#define MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000 +#define MISC_CTRL_MAX_BURST_SIZE_128 0x0 +#define MISC_CTRL_SCB0_SIZE_MASK 0xf8000000 + +#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c +#define PCIE_MEM_WIN0_LO(win) \ + PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4) + +#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010 +#define PCIE_MEM_WIN0_HI(win) \ + PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4) + +#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c +#define RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f + +#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034 +#define RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f +#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038 + +#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c +#define RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f + +#define PCIE_MISC_PCIE_STATUS 0x4068 +#define STATUS_PCIE_PORT_MASK 0x80 +#define STATUS_PCIE_PORT_SHIFT 7 +#define STATUS_PCIE_DL_ACTIVE_MASK 0x20 +#define STATUS_PCIE_DL_ACTIVE_SHIFT 5 +#define STATUS_PCIE_PHYLINKUP_MASK 0x10 +#define STATUS_PCIE_PHYLINKUP_SHIFT 4 + +#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070 +#define MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000 +#define MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0 +#define MEM_WIN0_BASE_LIMIT_BASE_HI_SHIFT 12 +#define PCIE_MEM_WIN0_BASE_LIMIT(win) \ + PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4) + +#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080 +#define MEM_WIN0_BASE_HI_BASE_MASK 0xff +#define PCIE_MEM_WIN0_BASE_HI(win) \ + PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8) + +#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084 +#define PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff +#define PCIE_MEM_WIN0_LIMIT_HI(win) \ + PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8) + +#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 +#define PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 + +#define PCIE_MSI_INTR2_CLR 0x4508 +#define PCIE_MSI_INTR2_MASK_SET 0x4510 + +#define PCIE_EXT_CFG_DATA 0x8000 + +#define PCIE_EXT_CFG_INDEX 0x9000 + +#define PCIE_RGR1_SW_INIT_1 0x9210 +#define RGR1_SW_INIT_1_PERST_MASK 0x1 +#define RGR1_SW_INIT_1_INIT_MASK 0x2 + +/* PCIe parameters */ +#define BRCM_NUM_PCIE_OUT_WINS 4 + +/* MDIO registers */ +#define MDIO_PORT0 0x0 +#define MDIO_DATA_MASK 0x7fffffff +#define MDIO_DATA_SHIFT 0 +#define MDIO_PORT_MASK 0xf0000 +#define MDIO_PORT_SHIFT 16 +#define MDIO_REGAD_MASK 0xffff +#define MDIO_REGAD_SHIFT 0 +#define MDIO_CMD_MASK 0xfff00000 +#define MDIO_CMD_SHIFT 20 +#define MDIO_CMD_READ 0x1 +#define MDIO_CMD_WRITE 0x0 +#define MDIO_DATA_DONE_MASK 0x80000000 +#define SSC_REGS_ADDR 0x1100 +#define SET_ADDR_OFFSET 0x1f +#define SSC_CNTL_OFFSET 0x2 +#define SSC_CNTL_OVRD_EN_MASK 0x8000 +#define SSC_CNTL_OVRD_VAL_MASK 0x4000 +#define SSC_STATUS_OFFSET 0x1 +#define SSC_STATUS_SSC_MASK 0x400 +#define SSC_STATUS_SSC_SHIFT 10 +#define SSC_STATUS_PLL_LOCK_MASK 0x800 +#define SSC_STATUS_PLL_LOCK_SHIFT 11 + +/** + * struct brcm_pcie - the PCIe controller state + * @base: Base address of memory mapped IO registers of the controller + * @gen: Non-zero value indicates limitation of the PCIe controller operation + * to a specific generation (1, 2 or 3) + * @ssc: true indicates active Spread Spectrum Clocking operation + */ +struct brcm_pcie { + void __iomem *base; + + int gen; + bool ssc; +}; + +/** + * brcm_pcie_encode_ibar_size() - Encode the inbound "BAR" region size + * @size: The inbound region size + * + * This function converts size of the inbound "BAR" region to the non-linear + * values of the PCIE_MISC_RC_BAR[123]_CONFIG_LO register SIZE field. + * + * Return: The encoded inbound region size + */ +static int brcm_pcie_encode_ibar_size(u64 size) +{ + int log2_in = ilog2(size); + + if (log2_in >= 12 && log2_in <= 15) + /* Covers 4KB to 32KB (inclusive) */ + return (log2_in - 12) + 0x1c; + else if (log2_in >= 16 && log2_in <= 37) + /* Covers 64KB to 32GB, (inclusive) */ + return log2_in - 15; + + /* Something is awry so disable */ + return 0; +} + +/** + * brcm_pcie_rc_mode() - Check if PCIe controller is in RC mode + * @pcie: Pointer to the PCIe controller state + * + * The controller is capable of serving in both RC and EP roles. + * + * Return: true for RC mode, false for EP mode. + */ +static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie) +{ + u32 val; + + val = readl(pcie->base + PCIE_MISC_PCIE_STATUS); + + return (val & STATUS_PCIE_PORT_MASK) >> STATUS_PCIE_PORT_SHIFT; +} + +/** + * brcm_pcie_link_up() - Check whether the PCIe link is up + * @pcie: Pointer to the PCIe controller state + * + * Return: true if the link is up, false otherwise. + */ +static bool brcm_pcie_link_up(struct brcm_pcie *pcie) +{ + u32 val, dla, plu; + + val = readl(pcie->base + PCIE_MISC_PCIE_STATUS); + dla = (val & STATUS_PCIE_DL_ACTIVE_MASK) >> STATUS_PCIE_DL_ACTIVE_SHIFT; + plu = (val & STATUS_PCIE_PHYLINKUP_MASK) >> STATUS_PCIE_PHYLINKUP_SHIFT; + + return dla && plu; +} + +static int brcm_pcie_config_address(const struct udevice *dev, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct brcm_pcie *pcie = dev_get_priv(dev); + unsigned int pci_bus = PCI_BUS(bdf); + unsigned int pci_dev = PCI_DEV(bdf); + unsigned int pci_func = PCI_FUNC(bdf); + int idx; + + /* + * Busses 0 (host PCIe bridge) and 1 (its immediate child) + * are limited to a single device each + */ + if (pci_bus < 2 && pci_dev > 0) + return -EINVAL; + + /* Accesses to the RC go right to the RC registers */ + if (pci_bus == 0) { + *paddress = pcie->base + offset; + return 0; + } + + /* An access to our HW w/o link-up will cause a CPU Abort */ + if (!brcm_pcie_link_up(pcie)) + return -EINVAL; + + /* For devices, write to the config space index register */ + idx = PCIE_ECAM_OFFSET(pci_bus, pci_dev, pci_func, 0); + + writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); + *paddress = pcie->base + PCIE_EXT_CFG_DATA + offset; + + return 0; +} + +static int brcm_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, brcm_pcie_config_address, + bdf, offset, valuep, size); +} + +static int brcm_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, brcm_pcie_config_address, + bdf, offset, value, size); +} + +static const char *link_speed_to_str(unsigned int cls) +{ + switch (cls) { + case PCI_EXP_LNKSTA_CLS_2_5GB: return "2.5"; + case PCI_EXP_LNKSTA_CLS_5_0GB: return "5.0"; + case PCI_EXP_LNKSTA_CLS_8_0GB: return "8.0"; + default: + break; + } + + return "??"; +} + +static u32 brcm_pcie_mdio_form_pkt(unsigned int port, unsigned int regad, + unsigned int cmd) +{ + u32 pkt; + + pkt = (port << MDIO_PORT_SHIFT) & MDIO_PORT_MASK; + pkt |= (regad << MDIO_REGAD_SHIFT) & MDIO_REGAD_MASK; + pkt |= (cmd << MDIO_CMD_SHIFT) & MDIO_CMD_MASK; + + return pkt; +} + +/** + * brcm_pcie_mdio_read() - Perform a register read on the internal MDIO bus + * @base: Pointer to the PCIe controller IO registers + * @port: The MDIO port number + * @regad: The register address + * @val: A pointer at which to store the read value + * + * Return: 0 on success and register value in @val, negative error value + * on failure. + */ +static int brcm_pcie_mdio_read(void __iomem *base, unsigned int port, + unsigned int regad, u32 *val) +{ + u32 data, addr; + int ret; + + addr = brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ); + writel(addr, base + PCIE_RC_DL_MDIO_ADDR); + readl(base + PCIE_RC_DL_MDIO_ADDR); + + ret = readl_poll_timeout(base + PCIE_RC_DL_MDIO_RD_DATA, data, + (data & MDIO_DATA_DONE_MASK), 100); + + *val = data & MDIO_DATA_MASK; + + return ret; +} + +/** + * brcm_pcie_mdio_write() - Perform a register write on the internal MDIO bus + * @base: Pointer to the PCIe controller IO registers + * @port: The MDIO port number + * @regad: Address of the register + * @wrdata: The value to write + * + * Return: 0 on success, negative error value on failure. + */ +static int brcm_pcie_mdio_write(void __iomem *base, unsigned int port, + unsigned int regad, u16 wrdata) +{ + u32 data, addr; + + addr = brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE); + writel(addr, base + PCIE_RC_DL_MDIO_ADDR); + readl(base + PCIE_RC_DL_MDIO_ADDR); + writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); + + return readl_poll_timeout(base + PCIE_RC_DL_MDIO_WR_DATA, data, + !(data & MDIO_DATA_DONE_MASK), 100); +} + +/** + * brcm_pcie_set_ssc() - Configure the controller for Spread Spectrum Clocking + * @base: pointer to the PCIe controller IO registers + * + * Return: 0 on success, negative error value on failure. + */ +static int brcm_pcie_set_ssc(void __iomem *base) +{ + int pll, ssc; + int ret; + u32 tmp; + + ret = brcm_pcie_mdio_write(base, MDIO_PORT0, SET_ADDR_OFFSET, + SSC_REGS_ADDR); + if (ret < 0) + return ret; + + ret = brcm_pcie_mdio_read(base, MDIO_PORT0, SSC_CNTL_OFFSET, &tmp); + if (ret < 0) + return ret; + + tmp |= (SSC_CNTL_OVRD_EN_MASK | SSC_CNTL_OVRD_VAL_MASK); + + ret = brcm_pcie_mdio_write(base, MDIO_PORT0, SSC_CNTL_OFFSET, tmp); + if (ret < 0) + return ret; + + udelay(1000); + ret = brcm_pcie_mdio_read(base, MDIO_PORT0, SSC_STATUS_OFFSET, &tmp); + if (ret < 0) + return ret; + + ssc = (tmp & SSC_STATUS_SSC_MASK) >> SSC_STATUS_SSC_SHIFT; + pll = (tmp & SSC_STATUS_PLL_LOCK_MASK) >> SSC_STATUS_PLL_LOCK_SHIFT; + + return ssc && pll ? 0 : -EIO; +} + +/** + * brcm_pcie_set_gen() - Limits operation to a specific generation (1, 2 or 3) + * @pcie: pointer to the PCIe controller state + * @gen: PCIe generation to limit the controller's operation to + */ +static void brcm_pcie_set_gen(struct brcm_pcie *pcie, unsigned int gen) +{ + void __iomem *cap_base = pcie->base + BRCM_PCIE_CAP_REGS; + + u16 lnkctl2 = readw(cap_base + PCI_EXP_LNKCTL2); + u32 lnkcap = readl(cap_base + PCI_EXP_LNKCAP); + + lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; + writel(lnkcap, cap_base + PCI_EXP_LNKCAP); + + lnkctl2 = (lnkctl2 & ~0xf) | gen; + writew(lnkctl2, cap_base + PCI_EXP_LNKCTL2); +} + +static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, + unsigned int win, u64 phys_addr, + u64 pcie_addr, u64 size) +{ + void __iomem *base = pcie->base; + u32 phys_addr_mb_high, limit_addr_mb_high; + phys_addr_t phys_addr_mb, limit_addr_mb; + int high_addr_shift; + u32 tmp; + + /* Set the base of the pcie_addr window */ + writel(lower_32_bits(pcie_addr), base + PCIE_MEM_WIN0_LO(win)); + writel(upper_32_bits(pcie_addr), base + PCIE_MEM_WIN0_HI(win)); + + /* Write the addr base & limit lower bits (in MBs) */ + phys_addr_mb = phys_addr / SZ_1M; + limit_addr_mb = (phys_addr + size - 1) / SZ_1M; + + tmp = readl(base + PCIE_MEM_WIN0_BASE_LIMIT(win)); + u32p_replace_bits(&tmp, phys_addr_mb, + MEM_WIN0_BASE_LIMIT_BASE_MASK); + u32p_replace_bits(&tmp, limit_addr_mb, + MEM_WIN0_BASE_LIMIT_LIMIT_MASK); + writel(tmp, base + PCIE_MEM_WIN0_BASE_LIMIT(win)); + + /* Write the cpu & limit addr upper bits */ + high_addr_shift = MEM_WIN0_BASE_LIMIT_BASE_HI_SHIFT; + phys_addr_mb_high = phys_addr_mb >> high_addr_shift; + tmp = readl(base + PCIE_MEM_WIN0_BASE_HI(win)); + u32p_replace_bits(&tmp, phys_addr_mb_high, + MEM_WIN0_BASE_HI_BASE_MASK); + writel(tmp, base + PCIE_MEM_WIN0_BASE_HI(win)); + + limit_addr_mb_high = limit_addr_mb >> high_addr_shift; + tmp = readl(base + PCIE_MEM_WIN0_LIMIT_HI(win)); + u32p_replace_bits(&tmp, limit_addr_mb_high, + PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK); + writel(tmp, base + PCIE_MEM_WIN0_LIMIT_HI(win)); +} + +static int brcm_pcie_probe(struct udevice *dev) +{ + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + struct brcm_pcie *pcie = dev_get_priv(dev); + void __iomem *base = pcie->base; + struct pci_region region; + bool ssc_good = false; + int num_out_wins = 0; + u64 rc_bar2_offset, rc_bar2_size; + unsigned int scb_size_val; + int i, ret; + u16 nlw, cls, lnksta; + u32 tmp; + + /* + * Reset the bridge, assert the fundamental reset. Note for some SoCs, + * e.g. BCM7278, the fundamental reset should not be asserted here. + * This will need to be changed when support for other SoCs is added. + */ + setbits_le32(base + PCIE_RGR1_SW_INIT_1, + RGR1_SW_INIT_1_INIT_MASK | RGR1_SW_INIT_1_PERST_MASK); + /* + * The delay is a safety precaution to preclude the reset signal + * from looking like a glitch. + */ + udelay(100); + + /* Take the bridge out of reset */ + clrbits_le32(base + PCIE_RGR1_SW_INIT_1, RGR1_SW_INIT_1_INIT_MASK); + + clrbits_le32(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG, + PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); + + /* Wait for SerDes to be stable */ + udelay(100); + + /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */ + clrsetbits_le32(base + PCIE_MISC_MISC_CTRL, + MISC_CTRL_MAX_BURST_SIZE_MASK, + MISC_CTRL_SCB_ACCESS_EN_MASK | + MISC_CTRL_CFG_READ_UR_MODE_MASK | + MISC_CTRL_MAX_BURST_SIZE_128); + + pci_get_dma_regions(dev, ®ion, 0); + rc_bar2_offset = region.bus_start - region.phys_start; + rc_bar2_size = 1ULL << fls64(region.size - 1); + + tmp = lower_32_bits(rc_bar2_offset); + u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size), + RC_BAR2_CONFIG_LO_SIZE_MASK); + writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO); + writel(upper_32_bits(rc_bar2_offset), + base + PCIE_MISC_RC_BAR2_CONFIG_HI); + + scb_size_val = rc_bar2_size ? + ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */ + + tmp = readl(base + PCIE_MISC_MISC_CTRL); + u32p_replace_bits(&tmp, scb_size_val, + MISC_CTRL_SCB0_SIZE_MASK); + writel(tmp, base + PCIE_MISC_MISC_CTRL); + + /* Disable the PCIe->GISB memory window (RC_BAR1) */ + clrbits_le32(base + PCIE_MISC_RC_BAR1_CONFIG_LO, + RC_BAR1_CONFIG_LO_SIZE_MASK); + + /* Disable the PCIe->SCB memory window (RC_BAR3) */ + clrbits_le32(base + PCIE_MISC_RC_BAR3_CONFIG_LO, + RC_BAR3_CONFIG_LO_SIZE_MASK); + + /* Mask all interrupts since we are not handling any yet */ + writel(0xffffffff, base + PCIE_MSI_INTR2_MASK_SET); + + /* Clear any interrupts we find on boot */ + writel(0xffffffff, base + PCIE_MSI_INTR2_CLR); + + if (pcie->gen) + brcm_pcie_set_gen(pcie, pcie->gen); + + /* Unassert the fundamental reset */ + clrbits_le32(pcie->base + PCIE_RGR1_SW_INIT_1, + RGR1_SW_INIT_1_PERST_MASK); + + /* + * Wait for 100ms after PERST# deassertion; see PCIe CEM specification + * sections 2.2, PCIe r5.0, 6.6.1. + */ + mdelay(100); + + /* Give the RC/EP time to wake up, before trying to configure RC. + * Intermittently check status for link-up, up to a total of 100ms. + */ + for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) + mdelay(5); + + if (!brcm_pcie_link_up(pcie)) { + printf("PCIe BRCM: link down\n"); + return -EINVAL; + } + + if (!brcm_pcie_rc_mode(pcie)) { + printf("PCIe misconfigured; is in EP mode\n"); + return -EINVAL; + } + + for (i = 0; i < hose->region_count; i++) { + struct pci_region *reg = &hose->regions[i]; + + if (reg->flags != PCI_REGION_MEM) + continue; + + if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) + return -EINVAL; + + brcm_pcie_set_outbound_win(pcie, num_out_wins, reg->phys_start, + reg->bus_start, reg->size); + + num_out_wins++; + } + + /* + * For config space accesses on the RC, show the right class for + * a PCIe-PCIe bridge (the default setting is to be EP mode). + */ + clrsetbits_le32(base + PCIE_RC_CFG_PRIV1_ID_VAL3, + CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK, 0x060400); + + if (pcie->ssc) { + ret = brcm_pcie_set_ssc(pcie->base); + if (!ret) + ssc_good = true; + else + printf("PCIe BRCM: failed attempt to enter SSC mode\n"); + } + + lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA); + cls = lnksta & PCI_EXP_LNKSTA_CLS; + nlw = (lnksta & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; + + printf("PCIe BRCM: link up, %s Gbps x%u %s\n", link_speed_to_str(cls), + nlw, ssc_good ? "(SSC)" : "(!SSC)"); + + /* PCIe->SCB endian mode for BAR */ + clrsetbits_le32(base + PCIE_RC_CFG_VENDOR_SPECIFIC_REG1, + VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK, + VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN); + + /* + * We used to enable the CLKREQ# input here, but a few PCIe cards don't + * attach anything to the CLKREQ# line, so we shouldn't assume that + * it's connected and working. The controller does allow detecting + * whether the port on the other side of our link is/was driving this + * signal, so we could check before we assume. But because this signal + * is for power management, which doesn't make sense in a bootloader, + * let's instead just unadvertise ASPM support. + */ + clrbits_le32(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY, + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); + + return 0; +} + +static int brcm_pcie_remove(struct udevice *dev) +{ + struct brcm_pcie *pcie = dev_get_priv(dev); + void __iomem *base = pcie->base; + + /* Assert fundamental reset */ + setbits_le32(base + PCIE_RGR1_SW_INIT_1, RGR1_SW_INIT_1_PERST_MASK); + + /* Turn off SerDes */ + setbits_le32(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG, + PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); + + /* Shutdown bridge */ + setbits_le32(base + PCIE_RGR1_SW_INIT_1, RGR1_SW_INIT_1_INIT_MASK); + + return 0; +} + +static int brcm_pcie_of_to_plat(struct udevice *dev) +{ + struct brcm_pcie *pcie = dev_get_priv(dev); + ofnode dn = dev_ofnode(dev); + u32 max_link_speed; + int ret; + + /* Get the controller base address */ + pcie->base = dev_read_addr_ptr(dev); + if (!pcie->base) + return -EINVAL; + + pcie->ssc = ofnode_read_bool(dn, "brcm,enable-ssc"); + + ret = ofnode_read_u32(dn, "max-link-speed", &max_link_speed); + if (ret < 0 || max_link_speed > 4) + pcie->gen = 0; + else + pcie->gen = max_link_speed; + + return 0; +} + +static const struct dm_pci_ops brcm_pcie_ops = { + .read_config = brcm_pcie_read_config, + .write_config = brcm_pcie_write_config, +}; + +static const struct udevice_id brcm_pcie_ids[] = { + { .compatible = "brcm,bcm2711-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_brcm_base) = { + .name = "pcie_brcm", + .id = UCLASS_PCI, + .ops = &brcm_pcie_ops, + .of_match = brcm_pcie_ids, + .probe = brcm_pcie_probe, + .remove = brcm_pcie_remove, + .of_to_plat = brcm_pcie_of_to_plat, + .priv_auto = sizeof(struct brcm_pcie), + .flags = DM_FLAG_OS_PREPARE, +}; diff --git a/drivers/pci/pcie_dw_common.c b/drivers/pci/pcie_dw_common.c new file mode 100644 index 00000000000..0673e516c6f --- /dev/null +++ b/drivers/pci/pcie_dw_common.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Copyright (c) 2021 Rockchip, Inc. + * + * Copyright (C) 2018 Texas Instruments, Inc + */ + +#include <dm.h> +#include <log.h> +#include <pci.h> +#include <dm/device_compat.h> +#include <asm/io.h> +#include <linux/delay.h> +#include "pcie_dw_common.h" + +int pcie_dw_get_link_speed(struct pcie_dw *pci) +{ + return (readl(pci->dbi_base + PCIE_LINK_STATUS_REG) & + PCIE_LINK_STATUS_SPEED_MASK) >> PCIE_LINK_STATUS_SPEED_OFF; +} + +int pcie_dw_get_link_width(struct pcie_dw *pci) +{ + return (readl(pci->dbi_base + PCIE_LINK_STATUS_REG) & + PCIE_LINK_STATUS_WIDTH_MASK) >> PCIE_LINK_STATUS_WIDTH_OFF; +} + +static void dw_pcie_writel_ob_unroll(struct pcie_dw *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + void __iomem *base = pci->atu_base; + + writel(val, base + offset + reg); +} + +static u32 dw_pcie_readl_ob_unroll(struct pcie_dw *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + void __iomem *base = pci->atu_base; + + return readl(base + offset + reg); +} + +/** + * pcie_dw_prog_outbound_atu_unroll() - Configure ATU for outbound accesses + * + * @pcie: Pointer to the PCI controller state + * @index: ATU region index + * @type: ATU accsess type + * @cpu_addr: the physical address for the translation entry + * @pci_addr: the pcie bus address for the translation entry + * @size: the size of the translation entry + * + * Return: 0 is successful and -1 is failure + */ +int pcie_dw_prog_outbound_atu_unroll(struct pcie_dw *pci, int index, + int type, u64 cpu_addr, + u64 pci_addr, u32 size) +{ + u32 retries, val; + + dev_dbg(pci->dev, "ATU programmed with: index: %d, type: %d, cpu addr: %8llx, pci addr: %8llx, size: %8x\n", + index, type, cpu_addr, pci_addr, size); + + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, + upper_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, + type); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ob_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + udelay(LINK_WAIT_IATU); + } + dev_err(pci->dev, "outbound iATU is not being enabled\n"); + + return -1; +} + +/** + * set_cfg_address() - Configure the PCIe controller config space access + * + * @pcie: Pointer to the PCI controller state + * @d: PCI device to access + * @where: Offset in the configuration space + * + * Configures the PCIe controller to access the configuration space of + * a specific PCIe device and returns the address to use for this + * access. + * + * Return: Address that can be used to access the configation space + * of the requested device / offset + */ +static uintptr_t set_cfg_address(struct pcie_dw *pcie, + pci_dev_t d, uint where) +{ + int bus = PCI_BUS(d) - pcie->first_busno; + uintptr_t va_address; + u32 atu_type; + int ret; + + /* Use dbi_base for own configuration read and write */ + if (!bus) { + va_address = (uintptr_t)pcie->dbi_base; + goto out; + } + + if (bus == 1) + /* + * For local bus whose primary bus number is root bridge, + * change TLP Type field to 4. + */ + atu_type = PCIE_ATU_TYPE_CFG0; + else + /* Otherwise, change TLP Type field to 5. */ + atu_type = PCIE_ATU_TYPE_CFG1; + + /* + * Not accessing root port configuration space? + * Region #1 is used for Outbound CFG space access. + * Direction = Outbound + * Region Index = 1 + */ + d = PCI_MASK_BUS(d); + d = PCI_ADD_BUS(bus, d); + ret = pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1, + atu_type, (u64)pcie->cfg_base, + d << 8, pcie->cfg_size); + if (ret) + return (uintptr_t)ret; + + va_address = (uintptr_t)pcie->cfg_base; + +out: + va_address += where & ~0x3; + + return va_address; +} + +/** + * pcie_dw_addr_valid() - Check for valid bus address + * + * @d: The PCI device to access + * @first_busno: Bus number of the PCIe controller root complex + * + * Return 1 (true) if the PCI device can be accessed by this controller. + * + * Return: 1 on valid, 0 on invalid + */ +static int pcie_dw_addr_valid(pci_dev_t d, int first_busno) +{ + if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0)) + return 0; + if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0)) + return 0; + + return 1; +} + +/** + * pcie_dw_read_config() - Read from configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + * + * Return: 0 on success + */ +int pcie_dw_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct pcie_dw *pcie = dev_get_priv(bus); + uintptr_t va_address; + ulong value; + + dev_dbg(pcie->dev, "PCIE CFG read: bdf=%2x:%2x:%2x ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) { + debug("- out of range\n"); + *valuep = pci_get_ff(size); + return 0; + } + + va_address = set_cfg_address(pcie, bdf, offset); + + value = readl((void __iomem *)va_address); + + debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value); + *valuep = pci_conv_32_to_size(value, offset, size); + + return pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pcie->io.phys_start, + pcie->io.bus_start, pcie->io.size); +} + +/** + * pcie_dw_write_config() - Write to configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + * + * Return: 0 on success + */ +int pcie_dw_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct pcie_dw *pcie = dev_get_priv(bus); + uintptr_t va_address; + ulong old; + + dev_dbg(pcie->dev, "PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + dev_dbg(pcie->dev, "(addr,val)=(0x%04x, 0x%08lx)\n", offset, value); + + if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) { + debug("- out of range\n"); + return 0; + } + + va_address = set_cfg_address(pcie, bdf, offset); + + old = readl((void __iomem *)va_address); + value = pci_conv_size_to_32(old, value, offset, size); + writel(value, (void __iomem *)va_address); + + return pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pcie->io.phys_start, + pcie->io.bus_start, pcie->io.size); +} + +/** + * pcie_dw_setup_host() - Setup the PCIe controller for RC opertaion + * + * @pcie: Pointer to the PCI controller state + * + * Configure the host BARs of the PCIe controller root port so that + * PCI(e) devices may access the system memory. + */ +void pcie_dw_setup_host(struct pcie_dw *pci) +{ + struct udevice *ctlr = pci_get_controller(pci->dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + u32 ret; + + if (!pci->atu_base) + pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; + + /* setup RC BARs */ + writel(PCI_BASE_ADDRESS_MEM_TYPE_64, + pci->dbi_base + PCI_BASE_ADDRESS_0); + writel(0x0, pci->dbi_base + PCI_BASE_ADDRESS_1); + + /* setup interrupt pins */ + clrsetbits_le32(pci->dbi_base + PCI_INTERRUPT_LINE, + 0xff00, 0x100); + + /* setup bus numbers */ + clrsetbits_le32(pci->dbi_base + PCI_PRIMARY_BUS, + 0xffffff, 0x00ff0100); + + /* setup command register */ + clrsetbits_le32(pci->dbi_base + PCI_PRIMARY_BUS, + 0xffff, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR); + + /* Enable write permission for the DBI read-only register */ + dw_pcie_dbi_write_enable(pci, true); + /* program correct class for RC */ + writew(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE); + /* Better disable write permission right after the update */ + dw_pcie_dbi_write_enable(pci, false); + + setbits_le32(pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, + PORT_LOGIC_SPEED_CHANGE); + + for (ret = 0; ret < hose->region_count; ret++) { + if (hose->regions[ret].flags == PCI_REGION_IO) { + pci->io.phys_start = hose->regions[ret].phys_start; /* IO base */ + pci->io.bus_start = hose->regions[ret].bus_start; /* IO_bus_addr */ + pci->io.size = hose->regions[ret].size; /* IO size */ + } else if (hose->regions[ret].flags == PCI_REGION_MEM) { + pci->mem.phys_start = hose->regions[ret].phys_start; /* MEM base */ + pci->mem.bus_start = hose->regions[ret].bus_start; /* MEM_bus_addr */ + pci->mem.size = hose->regions[ret].size; /* MEM size */ + } else if (hose->regions[ret].flags == PCI_REGION_PREFETCH) { + pci->prefetch.phys_start = hose->regions[ret].phys_start; /* PREFETCH base */ + pci->prefetch.bus_start = hose->regions[ret].bus_start; /* PREFETCH_bus_addr */ + pci->prefetch.size = hose->regions[ret].size; /* PREFETCH size */ + } else if (hose->regions[ret].flags == PCI_REGION_SYS_MEMORY) { + if (!pci->cfg_base) { + pci->cfg_base = (void *)(pci->io.phys_start - pci->io.size); + pci->cfg_size = pci->io.size; + } + } else { + dev_err(pci->dev, "invalid flags type!\n"); + } + } + + dev_dbg(pci->dev, "Config space: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->cfg_base, (u64)pci->cfg_base + pci->cfg_size, + (u64)pci->cfg_size); + + dev_dbg(pci->dev, "IO space: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->io.phys_start, (u64)pci->io.phys_start + pci->io.size, + (u64)pci->io.size); + + dev_dbg(pci->dev, "IO bus: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->io.bus_start, (u64)pci->io.bus_start + pci->io.size, + (u64)pci->io.size); + + dev_dbg(pci->dev, "MEM space: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->mem.phys_start, + (u64)pci->mem.phys_start + pci->mem.size, + (u64)pci->mem.size); + + dev_dbg(pci->dev, "MEM bus: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->mem.bus_start, + (u64)pci->mem.bus_start + pci->mem.size, + (u64)pci->mem.size); + + if (pci->prefetch.size) { + dev_dbg(pci->dev, "PREFETCH space: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->prefetch.phys_start, + (u64)pci->prefetch.phys_start + pci->prefetch.size, + (u64)pci->prefetch.size); + + dev_dbg(pci->dev, "PREFETCH bus: [0x%llx - 0x%llx, size 0x%llx]\n", + (u64)pci->prefetch.bus_start, + (u64)pci->prefetch.bus_start + pci->prefetch.size, + (u64)pci->prefetch.size); + } +} diff --git a/drivers/pci/pcie_dw_common.h b/drivers/pci/pcie_dw_common.h new file mode 100644 index 00000000000..e0f7796f2a8 --- /dev/null +++ b/drivers/pci/pcie_dw_common.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Copyright (c) 2021 Rockchip, Inc. + * + * Copyright (C) 2018 Texas Instruments, Inc + */ + +#ifndef PCIE_DW_COMMON_H +#define PCIE_DW_COMMON_H + +#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20) + +/* PCI DBICS registers */ +#define PCIE_LINK_STATUS_REG 0x80 +#define PCIE_LINK_STATUS_SPEED_OFF 16 +#define PCIE_LINK_STATUS_SPEED_MASK (0xf << PCIE_LINK_STATUS_SPEED_OFF) +#define PCIE_LINK_STATUS_WIDTH_OFF 20 +#define PCIE_LINK_STATUS_WIDTH_MASK (0xf << PCIE_LINK_STATUS_WIDTH_OFF) + +/* + * iATU Unroll-specific register definitions + * From 4.80 core version the address translation will be made by unroll. + * The registers are offset from atu_base + */ +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0c +#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 +#define PCIE_ATU_UNR_UPPER_LIMIT 0x20 + +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) + +/* Register address builder */ +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((region) << 9) + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU_US 10000 + +/* PCI DBICS registers */ +#define PCIE_LINK_STATUS_REG 0x80 +#define PCIE_LINK_STATUS_SPEED_OFF 16 +#define PCIE_LINK_STATUS_SPEED_MASK (0xf << PCIE_LINK_STATUS_SPEED_OFF) +#define PCIE_LINK_STATUS_WIDTH_OFF 20 +#define PCIE_LINK_STATUS_WIDTH_MASK (0xf << PCIE_LINK_STATUS_WIDTH_OFF) + +#define PCIE_LINK_CAPABILITY 0x7c +#define PCIE_LINK_CTL_2 0xa0 +#define TARGET_LINK_SPEED_MASK 0xf +#define LINK_SPEED_GEN_1 0x1 +#define LINK_SPEED_GEN_2 0x2 +#define LINK_SPEED_GEN_3 0x3 + +/* Synopsys-specific PCIe configuration registers */ +#define PCIE_PORT_LINK_CONTROL 0x710 +#define PORT_LINK_DLL_LINK_EN BIT(5) +#define PORT_LINK_FAST_LINK_MODE BIT(7) +#define PORT_LINK_MODE_MASK GENMASK(21, 16) +#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n) +#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1) +#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3) +#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7) +#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf) + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0) +#define PORT_LOGIC_SPEED_CHANGE BIT(17) +#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8) +#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n) +#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1) +#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2) +#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4) +#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8) + +#define PCIE_MISC_CONTROL_1_OFF 0x8bc +#define PCIE_DBI_RO_WR_EN BIT(0) + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU 10000 + +/** + * struct pcie_dw - DW PCIe controller state + * + * @dbi_base: The base address of dbi register space + * @cfg_base: The base address of configuration space + * @atu_base: The base address of ATU space + * @cfg_size: The size of the configuration space which is needed + * as it gets written into the PCIE_ATU_LIMIT register + * @first_busno: This driver supports multiple PCIe controllers. + * first_busno stores the bus number of the PCIe root-port + * number which may vary depending on the PCIe setup + * (PEX switches etc). + * @io: The IO space for EP's BAR + * @mem: The memory space for EP's BAR + * @prefetch: The prefetch space for EP's BAR + */ +struct pcie_dw { + struct udevice *dev; + void __iomem *dbi_base; + void __iomem *cfg_base; + void __iomem *atu_base; + fdt_size_t cfg_size; + + int first_busno; + + /* IO, MEM & PREFETCH PCI regions */ + struct pci_region io; + struct pci_region mem; + struct pci_region prefetch; +}; + +int pcie_dw_get_link_speed(struct pcie_dw *pci); + +int pcie_dw_get_link_width(struct pcie_dw *pci); + +int pcie_dw_prog_outbound_atu_unroll(struct pcie_dw *pci, int index, int type, u64 cpu_addr, + u64 pci_addr, u32 size); + +int pcie_dw_read_config(const struct udevice *bus, pci_dev_t bdf, uint offset, ulong *valuep, + enum pci_size_t size); + +int pcie_dw_write_config(struct udevice *bus, pci_dev_t bdf, uint offset, ulong value, + enum pci_size_t size); + +static inline void dw_pcie_dbi_write_enable(struct pcie_dw *pci, bool en) +{ + u32 val; + + val = readl(pci->dbi_base + PCIE_MISC_CONTROL_1_OFF); + if (en) + val |= PCIE_DBI_RO_WR_EN; + else + val &= ~PCIE_DBI_RO_WR_EN; + writel(val, pci->dbi_base + PCIE_MISC_CONTROL_1_OFF); +} + +void pcie_dw_setup_host(struct pcie_dw *pci); + +#endif diff --git a/drivers/pci/pcie_dw_imx.c b/drivers/pci/pcie_dw_imx.c new file mode 100644 index 00000000000..fdb463710ba --- /dev/null +++ b/drivers/pci/pcie_dw_imx.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2024 Linaro Ltd. + * + * Author: Sumit Garg <sumit.garg@linaro.org> + */ + +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <generic-phy.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <log.h> +#include <pci.h> +#include <power/regulator.h> +#include <regmap.h> +#include <reset.h> +#include <syscon.h> +#include <time.h> + +#include "pcie_dw_common.h" + +#define PCIE_LINK_CAPABILITY 0x7c +#define TARGET_LINK_SPEED_MASK 0xf +#define LINK_SPEED_GEN_1 0x1 +#define LINK_SPEED_GEN_2 0x2 +#define LINK_SPEED_GEN_3 0x3 + +#define PCIE_MISC_CONTROL_1_OFF 0x8bc +#define PCIE_DBI_RO_WR_EN BIT(0) + +#define PCIE_PORT_DEBUG0 0x728 +#define PCIE_PORT_DEBUG1 0x72c +#define PCIE_PORT_DEBUG1_LINK_UP BIT(4) +#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29) + +#define PCIE_LINK_UP_TIMEOUT_MS 100 + +#define IOMUXC_GPR14_OFFSET 0x38 +#define IMX8M_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) +#define IMX8M_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) + +struct pcie_dw_imx { + /* Must be first member of the struct */ + struct pcie_dw dw; + struct regmap *iomuxc_gpr; + struct clk_bulk clks; + struct gpio_desc reset_gpio; + struct reset_ctl apps_reset; + struct phy phy; + struct udevice *vpcie; +}; + +struct pcie_chip_info { + const char *gpr; +}; + +static const struct pcie_chip_info imx8mm_chip_info = { + .gpr = "fsl,imx8mm-iomuxc-gpr", +}; + +static const struct pcie_chip_info imx8mp_chip_info = { + .gpr = "fsl,imx8mp-iomuxc-gpr", +}; + +static void pcie_dw_configure(struct pcie_dw_imx *priv, u32 cap_speed) +{ + dw_pcie_dbi_write_enable(&priv->dw, true); + + clrsetbits_le32(priv->dw.dbi_base + PCIE_LINK_CAPABILITY, + TARGET_LINK_SPEED_MASK, cap_speed); + + dw_pcie_dbi_write_enable(&priv->dw, false); +} + +static void imx_pcie_ltssm_enable(struct pcie_dw_imx *priv) +{ + reset_deassert(&priv->apps_reset); +} + +static void imx_pcie_ltssm_disable(struct pcie_dw_imx *priv) +{ + reset_assert(&priv->apps_reset); +} + +static bool is_link_up(u32 val) +{ + return ((val & PCIE_PORT_DEBUG1_LINK_UP) && + (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); +} + +static int wait_link_up(struct pcie_dw_imx *priv) +{ + u32 val; + + return readl_poll_sleep_timeout(priv->dw.dbi_base + PCIE_PORT_DEBUG1, + val, is_link_up(val), 10000, 100000); +} + +static int pcie_link_up(struct pcie_dw_imx *priv, u32 cap_speed) +{ + int ret; + + /* DW pre link configurations */ + pcie_dw_configure(priv, cap_speed); + + /* Initiate link training */ + imx_pcie_ltssm_enable(priv); + + /* Check that link was established */ + ret = wait_link_up(priv); + if (ret) + imx_pcie_ltssm_disable(priv); + + return ret; +} + +static int imx_pcie_assert_core_reset(struct pcie_dw_imx *priv) +{ + if (dm_gpio_is_valid(&priv->reset_gpio)) { + dm_gpio_set_value(&priv->reset_gpio, 1); + mdelay(20); + } + + return reset_assert(&priv->apps_reset); +} + +static int imx_pcie_clk_enable(struct pcie_dw_imx *priv) +{ + int ret; + + ret = clk_enable_bulk(&priv->clks); + if (ret) + return ret; + + /* + * Set the over ride low and enabled make sure that + * REF_CLK is turned on. + */ + regmap_update_bits(priv->iomuxc_gpr, IOMUXC_GPR14_OFFSET, + IMX8M_GPR_PCIE_CLK_REQ_OVERRIDE, 0); + regmap_update_bits(priv->iomuxc_gpr, IOMUXC_GPR14_OFFSET, + IMX8M_GPR_PCIE_CLK_REQ_OVERRIDE_EN, + IMX8M_GPR_PCIE_CLK_REQ_OVERRIDE_EN); + + /* allow the clocks to stabilize */ + udelay(500); + + return 0; +} + +static void imx_pcie_deassert_core_reset(struct pcie_dw_imx *priv) +{ + if (!dm_gpio_is_valid(&priv->reset_gpio)) + return; + + mdelay(100); + dm_gpio_set_value(&priv->reset_gpio, 0); + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ + mdelay(100); +} + +static int pcie_dw_imx_probe(struct udevice *dev) +{ + struct pcie_dw_imx *priv = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int ret; + + if (priv->vpcie) { + ret = regulator_set_enable(priv->vpcie, true); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator\n"); + return ret; + } + } + + ret = imx_pcie_assert_core_reset(priv); + if (ret) { + dev_err(dev, "failed to assert core reset\n"); + return ret; + } + + ret = imx_pcie_clk_enable(priv); + if (ret) { + dev_err(dev, "failed to enable clocks\n"); + goto err_clk; + } + + ret = generic_phy_init(&priv->phy); + if (ret) { + dev_err(dev, "failed to initialize PHY\n"); + goto err_phy_init; + } + + ret = generic_phy_power_on(&priv->phy); + if (ret) { + dev_err(dev, "failed to power on PHY\n"); + goto err_phy_power; + } + + imx_pcie_deassert_core_reset(priv); + + priv->dw.first_busno = dev_seq(dev); + priv->dw.dev = dev; + pcie_dw_setup_host(&priv->dw); + + if (pcie_link_up(priv, LINK_SPEED_GEN_1)) { + printf("PCIE-%d: Link down\n", dev_seq(dev)); + ret = -ENODEV; + goto err_link; + } + + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", dev_seq(dev), + pcie_dw_get_link_speed(&priv->dw), + pcie_dw_get_link_width(&priv->dw), + hose->first_busno); + + pcie_dw_prog_outbound_atu_unroll(&priv->dw, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, + priv->dw.mem.phys_start, + priv->dw.mem.bus_start, priv->dw.mem.size); + + return 0; + +err_link: + generic_shutdown_phy(&priv->phy); +err_phy_power: + generic_phy_exit(&priv->phy); +err_phy_init: + clk_disable_bulk(&priv->clks); +err_clk: + imx_pcie_deassert_core_reset(priv); + + return ret; +} + +static int pcie_dw_imx_remove(struct udevice *dev) +{ + struct pcie_dw_imx *priv = dev_get_priv(dev); + + generic_shutdown_phy(&priv->phy); + dm_gpio_free(dev, &priv->reset_gpio); + reset_free(&priv->apps_reset); + clk_release_bulk(&priv->clks); + + return 0; +} + +static int pcie_dw_imx_of_to_plat(struct udevice *dev) +{ + struct pcie_chip_info *info = (void *)dev_get_driver_data(dev); + struct pcie_dw_imx *priv = dev_get_priv(dev); + ofnode gpr; + int ret; + + /* Get the controller base address */ + priv->dw.dbi_base = (void *)dev_read_addr_name(dev, "dbi"); + if ((fdt_addr_t)priv->dw.dbi_base == FDT_ADDR_T_NONE) { + dev_err(dev, "failed to get dbi_base address\n"); + return -EINVAL; + } + + /* Get the config space base address and size */ + priv->dw.cfg_base = (void *)dev_read_addr_size_name(dev, "config", + &priv->dw.cfg_size); + if ((fdt_addr_t)priv->dw.cfg_base == FDT_ADDR_T_NONE) { + dev_err(dev, "failed to get cfg_base address\n"); + return -EINVAL; + } + + ret = clk_get_bulk(dev, &priv->clks); + if (ret) { + dev_err(dev, "failed to get PCIe clks\n"); + return ret; + } + + ret = reset_get_by_name(dev, "apps", &priv->apps_reset); + if (ret) { + dev_err(dev, + "Failed to get PCIe apps reset control\n"); + goto err_reset; + } + + ret = gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, + GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); + if (ret) { + dev_err(dev, "unable to get reset-gpio\n"); + goto err_gpio; + } + + ret = generic_phy_get_by_name(dev, "pcie-phy", &priv->phy); + if (ret) { + dev_err(dev, "failed to get pcie phy\n"); + goto err_phy; + } + + gpr = ofnode_by_compatible(ofnode_null(), info->gpr); + if (ofnode_equal(gpr, ofnode_null())) { + dev_err(dev, "unable to find GPR node\n"); + ret = -ENODEV; + goto err_phy; + } + + priv->iomuxc_gpr = syscon_node_to_regmap(gpr); + if (IS_ERR(priv->iomuxc_gpr)) { + dev_err(dev, "unable to find iomuxc registers\n"); + ret = PTR_ERR(priv->iomuxc_gpr); + goto err_phy; + } + + /* vpcie-supply regulator is optional */ + device_get_supply_regulator(dev, "vpcie-supply", &priv->vpcie); + + return 0; + +err_phy: + dm_gpio_free(dev, &priv->reset_gpio); +err_gpio: + reset_free(&priv->apps_reset); +err_reset: + clk_release_bulk(&priv->clks); + + return ret; +} + +static const struct dm_pci_ops pcie_dw_imx_ops = { + .read_config = pcie_dw_read_config, + .write_config = pcie_dw_write_config, +}; + +static const struct udevice_id pcie_dw_imx_ids[] = { + { .compatible = "fsl,imx8mm-pcie", .data = (ulong)&imx8mm_chip_info, }, + { .compatible = "fsl,imx8mp-pcie", .data = (ulong)&imx8mp_chip_info, }, + { } +}; + +U_BOOT_DRIVER(pcie_dw_imx) = { + .name = "pcie_dw_imx", + .id = UCLASS_PCI, + .of_match = pcie_dw_imx_ids, + .ops = &pcie_dw_imx_ops, + .of_to_plat = pcie_dw_imx_of_to_plat, + .probe = pcie_dw_imx_probe, + .remove = pcie_dw_imx_remove, + .priv_auto = sizeof(struct pcie_dw_imx), +}; diff --git a/drivers/pci/pcie_dw_meson.c b/drivers/pci/pcie_dw_meson.c new file mode 100644 index 00000000000..bb78e7874b1 --- /dev/null +++ b/drivers/pci/pcie_dw_meson.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Amlogic DesignWare based PCIe host controller driver + * + * Copyright (c) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Based on pcie_dw_rockchip.c + * Copyright (c) 2021 Rockchip, Inc. + */ + +#include <clk.h> +#include <dm.h> +#include <generic-phy.h> +#include <pci.h> +#include <power-domain.h> +#include <reset.h> +#include <syscon.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <dm/device_compat.h> +#include <linux/iopoll.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/bitfield.h> + +#include "pcie_dw_common.h" + +DECLARE_GLOBAL_DATA_PTR; + +/** + * struct meson_pcie - Amlogic Meson DW PCIe controller state + * + * @pci: The common PCIe DW structure + * @meson_cfg_base: The base address of vendor regs + * @phy + * @clk_port + * @clk_general + * @clk_pclk + * @rsts + * @rst_gpio: The #PERST signal for slot + */ +struct meson_pcie { + /* Must be first member of the struct */ + struct pcie_dw dw; + void *meson_cfg_base; + struct phy phy; + struct clk clk_port; + struct clk clk_general; + struct clk clk_pclk; + struct reset_ctl_bulk rsts; + struct gpio_desc rst_gpio; +}; + +#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ + +#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5) +#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12) + +/* PCIe specific config registers */ +#define PCIE_CFG0 0x0 +#define APP_LTSSM_ENABLE BIT(7) + +#define PCIE_CFG_STATUS12 0x30 +#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6)) +#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16)) +#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11) + +#define PCIE_CFG_STATUS17 0x44 +#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1) + +#define WAIT_LINKUP_TIMEOUT 4000 +#define PORT_CLK_RATE 100000000UL +#define MAX_PAYLOAD_SIZE 256 +#define MAX_READ_REQ_SIZE 256 +#define PCIE_RESET_DELAY 500 +#define PCIE_SHARED_RESET 1 +#define PCIE_NORMAL_RESET 0 + +enum pcie_data_rate { + PCIE_GEN1, + PCIE_GEN2, + PCIE_GEN3, + PCIE_GEN4 +}; + +/* Parameters for the waiting for #perst signal */ +#define PERST_WAIT_US 1000000 + +static inline u32 meson_cfg_readl(struct meson_pcie *priv, u32 reg) +{ + return readl(priv->meson_cfg_base + reg); +} + +static inline void meson_cfg_writel(struct meson_pcie *priv, u32 val, u32 reg) +{ + writel(val, priv->meson_cfg_base + reg); +} + +/** + * meson_pcie_configure() - Configure link + * + * @meson_pcie: Pointer to the PCI controller state + * + * Configure the link mode and width + */ +static void meson_pcie_configure(struct meson_pcie *priv) +{ + u32 val; + + dw_pcie_dbi_write_enable(&priv->dw, true); + + val = readl(priv->dw.dbi_base + PCIE_PORT_LINK_CONTROL); + val &= ~PORT_LINK_FAST_LINK_MODE; + val |= PORT_LINK_DLL_LINK_EN; + val &= ~PORT_LINK_MODE_MASK; + val |= PORT_LINK_MODE_1_LANES; + writel(val, priv->dw.dbi_base + PCIE_PORT_LINK_CONTROL); + + val = readl(priv->dw.dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~PORT_LOGIC_LINK_WIDTH_MASK; + val |= PORT_LOGIC_LINK_WIDTH_1_LANES; + writel(val, priv->dw.dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + + dw_pcie_dbi_write_enable(&priv->dw, false); +} + +static inline void meson_pcie_enable_ltssm(struct meson_pcie *priv) +{ + u32 val; + + val = meson_cfg_readl(priv, PCIE_CFG0); + val |= APP_LTSSM_ENABLE; + meson_cfg_writel(priv, val, PCIE_CFG0); +} + +static int meson_pcie_wait_link_up(struct meson_pcie *priv) +{ + u32 speed_okay = 0; + u32 cnt = 0; + u32 state12, state17, smlh_up, ltssm_up, rdlh_up; + + do { + state12 = meson_cfg_readl(priv, PCIE_CFG_STATUS12); + state17 = meson_cfg_readl(priv, PCIE_CFG_STATUS17); + smlh_up = IS_SMLH_LINK_UP(state12); + rdlh_up = IS_RDLH_LINK_UP(state12); + ltssm_up = IS_LTSSM_UP(state12); + + if (PM_CURRENT_STATE(state17) < PCIE_GEN3) + speed_okay = 1; + + if (smlh_up) + debug("%s: smlh_link_up is on\n", __func__); + if (rdlh_up) + debug("%s: rdlh_link_up is on\n", __func__); + if (ltssm_up) + debug("%s: ltssm_up is on\n", __func__); + if (speed_okay) + debug("%s: speed_okay\n", __func__); + + if (smlh_up && rdlh_up && ltssm_up && speed_okay) + return 0; + + cnt++; + + udelay(10); + } while (cnt < WAIT_LINKUP_TIMEOUT); + + printf("%s: error: wait linkup timeout\n", __func__); + return -EIO; +} + +/** + * meson_pcie_link_up() - Wait for the link to come up + * + * @meson_pcie: Pointer to the PCI controller state + * @cap_speed: Desired link speed + * + * Return: 1 (true) for active line and negative (false) for no link (timeout) + */ +static int meson_pcie_link_up(struct meson_pcie *priv, u32 cap_speed) +{ + /* DW link configurations */ + meson_pcie_configure(priv); + + /* Reset the device */ + if (dm_gpio_is_valid(&priv->rst_gpio)) { + dm_gpio_set_value(&priv->rst_gpio, 1); + /* + * Minimal is 100ms from spec but we see + * some wired devices need much more, such as 600ms. + * Add a enough delay to cover all cases. + */ + udelay(PERST_WAIT_US); + dm_gpio_set_value(&priv->rst_gpio, 0); + } + + /* Enable LTSSM */ + meson_pcie_enable_ltssm(priv); + + return meson_pcie_wait_link_up(priv); +} + +static int meson_size_to_payload(int size) +{ + /* + * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1. + * So if input size is not 2^order alignment or less than 2^7 or bigger + * than 2^12, just set to default size 2^(1+7). + */ + if (!is_power_of_2(size) || size < 128 || size > 4096) { + debug("%s: payload size %d, set to default 256\n", __func__, size); + return 1; + } + + return fls(size) - 8; +} + +static void meson_set_max_payload(struct meson_pcie *priv, int size) +{ + u32 val; + u16 offset = dm_pci_find_capability(priv->dw.dev, PCI_CAP_ID_EXP); + int max_payload_size = meson_size_to_payload(size); + + dw_pcie_dbi_write_enable(&priv->dw, true); + + val = readl(priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + val &= ~PCI_EXP_DEVCTL_PAYLOAD; + writel(val, priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + + val = readl(priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size); + writel(val, priv->dw.dbi_base + PCI_EXP_DEVCTL); + + dw_pcie_dbi_write_enable(&priv->dw, false); +} + +static void meson_set_max_rd_req_size(struct meson_pcie *priv, int size) +{ + u32 val; + u16 offset = dm_pci_find_capability(priv->dw.dev, PCI_CAP_ID_EXP); + int max_rd_req_size = meson_size_to_payload(size); + + dw_pcie_dbi_write_enable(&priv->dw, true); + + val = readl(priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + val &= ~PCI_EXP_DEVCTL_PAYLOAD; + writel(val, priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + + val = readl(priv->dw.dbi_base + offset + PCI_EXP_DEVCTL); + val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size); + writel(val, priv->dw.dbi_base + PCI_EXP_DEVCTL); + + dw_pcie_dbi_write_enable(&priv->dw, false); +} + +static int meson_pcie_init_port(struct udevice *dev) +{ + int ret; + struct meson_pcie *priv = dev_get_priv(dev); + + ret = generic_phy_init(&priv->phy); + if (ret) { + dev_err(dev, "failed to init phy (ret=%d)\n", ret); + return ret; + } + + ret = generic_phy_power_on(&priv->phy); + if (ret) { + dev_err(dev, "failed to power on phy (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = generic_phy_reset(&priv->phy); + if (ret) { + dev_err(dev, "failed to reset phy (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_assert_bulk(&priv->rsts); + if (ret) { + dev_err(dev, "failed to assert resets (ret=%d)\n", ret); + goto err_power_off_phy; + } + + udelay(PCIE_RESET_DELAY); + + ret = reset_deassert_bulk(&priv->rsts); + if (ret) { + dev_err(dev, "failed to deassert resets (ret=%d)\n", ret); + goto err_power_off_phy; + } + + udelay(PCIE_RESET_DELAY); + + ret = clk_set_rate(&priv->clk_port, PORT_CLK_RATE); + if (ret) { + dev_err(dev, "failed to set port clk rate (ret=%d)\n", ret); + goto err_deassert_bulk; + } + + ret = clk_enable(&priv->clk_general); + if (ret) { + dev_err(dev, "failed to enable clk general (ret=%d)\n", ret); + goto err_deassert_bulk; + } + + ret = clk_enable(&priv->clk_pclk); + if (ret) { + dev_err(dev, "failed to enable pclk (ret=%d)\n", ret); + goto err_deassert_bulk; + } + + meson_set_max_payload(priv, MAX_PAYLOAD_SIZE); + meson_set_max_rd_req_size(priv, MAX_READ_REQ_SIZE); + + pcie_dw_setup_host(&priv->dw); + + meson_pcie_link_up(priv, LINK_SPEED_GEN_2); + + return 0; +err_deassert_bulk: + reset_assert_bulk(&priv->rsts); +err_power_off_phy: + generic_phy_power_off(&priv->phy); +err_exit_phy: + generic_phy_exit(&priv->phy); + + return ret; +} + +static int meson_pcie_parse_dt(struct udevice *dev) +{ + struct meson_pcie *priv = dev_get_priv(dev); + int ret; + + priv->dw.dbi_base = dev_read_addr_index_ptr(dev, 0); + if (!priv->dw.dbi_base) + return -EINVAL; + + dev_dbg(dev, "ELBI address is 0x%p\n", priv->dw.dbi_base); + + priv->meson_cfg_base = dev_read_addr_index_ptr(dev, 1); + if (!priv->meson_cfg_base) + return -EINVAL; + + dev_dbg(dev, "CFG address is 0x%p\n", priv->meson_cfg_base); + + ret = gpio_request_by_name(dev, "reset-gpios", 0, + &priv->rst_gpio, GPIOD_IS_OUT); + if (ret) { + dev_err(dev, "failed to find reset-gpios property\n"); + return ret; + } + + ret = reset_get_bulk(dev, &priv->rsts); + if (ret) { + dev_err(dev, "Can't get reset: %d\n", ret); + return ret; + } + + ret = clk_get_by_name(dev, "port", &priv->clk_port); + if (ret) { + dev_err(dev, "Can't get port clock: %d\n", ret); + return ret; + } + + ret = clk_get_by_name(dev, "general", &priv->clk_general); + if (ret) { + dev_err(dev, "Can't get port clock: %d\n", ret); + return ret; + } + + ret = clk_get_by_name(dev, "pclk", &priv->clk_pclk); + if (ret) { + dev_err(dev, "Can't get port clock: %d\n", ret); + return ret; + } + + ret = generic_phy_get_by_index(dev, 0, &priv->phy); + if (ret) { + dev_err(dev, "failed to get pcie phy (ret=%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * meson_pcie_probe() - Probe the PCIe bus for active link + * + * @dev: A pointer to the device being operated on + * + * Probe for an active link on the PCIe bus and configure the controller + * to enable this port. + * + * Return: 0 on success, else -ENODEV + */ +static int meson_pcie_probe(struct udevice *dev) +{ + struct meson_pcie *priv = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int ret = 0; + + priv->dw.first_busno = dev_seq(dev); + priv->dw.dev = dev; + + ret = meson_pcie_parse_dt(dev); + if (ret) + return ret; + + ret = meson_pcie_init_port(dev); + if (ret) { + dm_gpio_free(dev, &priv->rst_gpio); + return ret; + } + + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", + dev_seq(dev), pcie_dw_get_link_speed(&priv->dw), + pcie_dw_get_link_width(&priv->dw), + hose->first_busno); + + return pcie_dw_prog_outbound_atu_unroll(&priv->dw, + PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, + priv->dw.mem.phys_start, + priv->dw.mem.bus_start, + priv->dw.mem.size); +} + +static const struct dm_pci_ops meson_pcie_ops = { + .read_config = pcie_dw_read_config, + .write_config = pcie_dw_write_config, +}; + +static const struct udevice_id meson_pcie_ids[] = { + { .compatible = "amlogic,axg-pcie" }, + { .compatible = "amlogic,g12a-pcie" }, + { } +}; + +U_BOOT_DRIVER(meson_dw_pcie) = { + .name = "pcie_dw_meson", + .id = UCLASS_PCI, + .of_match = meson_pcie_ids, + .ops = &meson_pcie_ops, + .probe = meson_pcie_probe, + .priv_auto = sizeof(struct meson_pcie), +}; diff --git a/drivers/pci/pcie_dw_mvebu.c b/drivers/pci/pcie_dw_mvebu.c new file mode 100644 index 00000000000..43b919175c9 --- /dev/null +++ b/drivers/pci/pcie_dw_mvebu.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2015 Marvell International Ltd. + * + * Copyright (C) 2016 Stefan Roese <sr@denx.de> + * + * Based on: + * - drivers/pci/pcie_imx.c + * - drivers/pci/pci_mvebu.c + * - drivers/pci/pcie_xilinx.c + */ + +#include <config.h> +#include <dm.h> +#include <log.h> +#include <pci.h> +#include <time.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <linux/delay.h> + +DECLARE_GLOBAL_DATA_PTR; + +/* PCI Config space registers */ +#define PCIE_CONFIG_BAR0 0x10 +#define PCIE_LINK_STATUS_REG 0x80 +#define PCIE_LINK_STATUS_SPEED_OFF 16 +#define PCIE_LINK_STATUS_SPEED_MASK (0xf << PCIE_LINK_STATUS_SPEED_OFF) +#define PCIE_LINK_STATUS_WIDTH_OFF 20 +#define PCIE_LINK_STATUS_WIDTH_MASK (0xf << PCIE_LINK_STATUS_WIDTH_OFF) + +/* Resizable bar capability registers */ +#define RESIZABLE_BAR_CAP 0x250 +#define RESIZABLE_BAR_CTL0 0x254 +#define RESIZABLE_BAR_CTL1 0x258 + +/* iATU registers */ +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +#define PCIE_LINK_CAPABILITY 0x7C +#define PCIE_LINK_CTL_2 0xA0 +#define TARGET_LINK_SPEED_MASK 0xF +#define LINK_SPEED_GEN_1 0x1 +#define LINK_SPEED_GEN_2 0x2 +#define LINK_SPEED_GEN_3 0x3 + +#define PCIE_GEN3_RELATED 0x890 +#define GEN3_EQU_DISABLE (1 << 16) +#define GEN3_ZRXDC_NON_COMP (1 << 0) + +#define PCIE_GEN3_EQU_CTRL 0x8A8 +#define GEN3_EQU_EVAL_2MS_DISABLE (1 << 5) + +#define PCIE_ROOT_COMPLEX_MODE_MASK (0xF << 4) + +#define PCIE_LINK_UP_TIMEOUT_MS 100 + +#define PCIE_GLOBAL_CONTROL 0x8000 +#define PCIE_APP_LTSSM_EN (1 << 2) +#define PCIE_DEVICE_TYPE_OFFSET (4) +#define PCIE_DEVICE_TYPE_MASK (0xF) +#define PCIE_DEVICE_TYPE_EP (0x0) /* Endpoint */ +#define PCIE_DEVICE_TYPE_LEP (0x1) /* Legacy endpoint */ +#define PCIE_DEVICE_TYPE_RC (0x4) /* Root complex */ + +#define PCIE_GLOBAL_STATUS 0x8008 +#define PCIE_GLB_STS_RDLH_LINK_UP (1 << 1) +#define PCIE_GLB_STS_PHY_LINK_UP (1 << 9) + +#define PCIE_ARCACHE_TRC 0x8050 +#define PCIE_AWCACHE_TRC 0x8054 +#define ARCACHE_SHAREABLE_CACHEABLE 0x3511 +#define AWCACHE_SHAREABLE_CACHEABLE 0x5311 + +#define LINK_SPEED_GEN_1 0x1 +#define LINK_SPEED_GEN_2 0x2 +#define LINK_SPEED_GEN_3 0x3 + +/** + * struct pcie_dw_mvebu - MVEBU DW PCIe controller state + * + * @ctrl_base: The base address of the register space + * @cfg_base: The base address of the configuration space + * @cfg_size: The size of the configuration space which is needed + * as it gets written into the PCIE_ATU_LIMIT register + * @first_busno: This driver supports multiple PCIe controllers. + * first_busno stores the bus number of the PCIe root-port + * number which may vary depending on the PCIe setup + * (PEX switches etc). + */ +struct pcie_dw_mvebu { + void *ctrl_base; + void *cfg_base; + fdt_size_t cfg_size; + int first_busno; + + /* IO and MEM PCI regions */ + int region_count; + struct pci_region io; + struct pci_region mem; +}; + +static int pcie_dw_get_link_speed(const void *regs_base) +{ + return (readl(regs_base + PCIE_LINK_STATUS_REG) & + PCIE_LINK_STATUS_SPEED_MASK) >> PCIE_LINK_STATUS_SPEED_OFF; +} + +static int pcie_dw_get_link_width(const void *regs_base) +{ + return (readl(regs_base + PCIE_LINK_STATUS_REG) & + PCIE_LINK_STATUS_WIDTH_MASK) >> PCIE_LINK_STATUS_WIDTH_OFF; +} + +/** + * pcie_dw_prog_outbound_atu() - Configure ATU for outbound accesses + * + * @pcie: Pointer to the PCI controller state + * @index: ATU region index + * @type: ATU accsess type + * @cpu_addr: the physical address for the translation entry + * @pci_addr: the pcie bus address for the translation entry + * @size: the size of the translation entry + */ +static void pcie_dw_prog_outbound_atu(struct pcie_dw_mvebu *pcie, int index, + int type, u64 cpu_addr, u64 pci_addr, + u32 size) +{ + writel(PCIE_ATU_REGION_OUTBOUND | index, + pcie->ctrl_base + PCIE_ATU_VIEWPORT); + writel(lower_32_bits(cpu_addr), pcie->ctrl_base + PCIE_ATU_LOWER_BASE); + writel(upper_32_bits(cpu_addr), pcie->ctrl_base + PCIE_ATU_UPPER_BASE); + writel(lower_32_bits(cpu_addr + size - 1), + pcie->ctrl_base + PCIE_ATU_LIMIT); + writel(lower_32_bits(pci_addr), + pcie->ctrl_base + PCIE_ATU_LOWER_TARGET); + writel(upper_32_bits(pci_addr), + pcie->ctrl_base + PCIE_ATU_UPPER_TARGET); + writel(type, pcie->ctrl_base + PCIE_ATU_CR1); + writel(PCIE_ATU_ENABLE, pcie->ctrl_base + PCIE_ATU_CR2); +} + +/** + * set_cfg_address() - Configure the PCIe controller config space access + * + * @pcie: Pointer to the PCI controller state + * @d: PCI device to access + * @where: Offset in the configuration space + * + * Configures the PCIe controller to access the configuration space of + * a specific PCIe device and returns the address to use for this + * access. + * + * Return: Address that can be used to access the configation space + * of the requested device / offset + */ +static uintptr_t set_cfg_address(struct pcie_dw_mvebu *pcie, + pci_dev_t d, uint where) +{ + uintptr_t va_address; + u32 atu_type; + + /* + * Region #0 is used for Outbound CFG space access. + * Direction = Outbound + * Region Index = 0 + */ + + if (PCI_BUS(d) == (pcie->first_busno + 1)) + /* For local bus, change TLP Type field to 4. */ + atu_type = PCIE_ATU_TYPE_CFG0; + else + /* Otherwise, change TLP Type field to 5. */ + atu_type = PCIE_ATU_TYPE_CFG1; + + if (PCI_BUS(d) == pcie->first_busno) { + /* Accessing root port configuration space. */ + va_address = (uintptr_t)pcie->ctrl_base; + } else { + d = PCI_MASK_BUS(d) | (PCI_BUS(d) - pcie->first_busno); + pcie_dw_prog_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, + atu_type, (u64)pcie->cfg_base, + d << 8, pcie->cfg_size); + va_address = (uintptr_t)pcie->cfg_base; + } + + va_address += where & ~0x3; + + return va_address; +} + +/** + * pcie_dw_addr_valid() - Check for valid bus address + * + * @d: The PCI device to access + * @first_busno: Bus number of the PCIe controller root complex + * + * Return 1 (true) if the PCI device can be accessed by this controller. + * + * Return: 1 on valid, 0 on invalid + */ +static int pcie_dw_addr_valid(pci_dev_t d, int first_busno) +{ + if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0)) + return 0; + if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0)) + return 0; + + return 1; +} + +/** + * pcie_dw_mvebu_read_config() - Read from configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + * + * Return: 0 on success + */ +static int pcie_dw_mvebu_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct pcie_dw_mvebu *pcie = dev_get_priv(bus); + uintptr_t va_address; + ulong value; + + debug("PCIE CFG read: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) { + debug("- out of range\n"); + *valuep = pci_get_ff(size); + return 0; + } + + va_address = set_cfg_address(pcie, bdf, offset); + + value = readl(va_address); + + debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value); + *valuep = pci_conv_32_to_size(value, offset, size); + + if (pcie->region_count > 1) + pcie_dw_prog_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pcie->io.phys_start, + pcie->io.bus_start, pcie->io.size); + + return 0; +} + +/** + * pcie_dw_mvebu_write_config() - Write to configuration space + * + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + * + * Return: 0 on success + */ +static int pcie_dw_mvebu_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct pcie_dw_mvebu *pcie = dev_get_priv(bus); + uintptr_t va_address; + ulong old; + + debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value); + + if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) { + debug("- out of range\n"); + return 0; + } + + va_address = set_cfg_address(pcie, bdf, offset); + + old = readl(va_address); + value = pci_conv_size_to_32(old, value, offset, size); + writel(value, va_address); + + if (pcie->region_count > 1) + pcie_dw_prog_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pcie->io.phys_start, + pcie->io.bus_start, pcie->io.size); + + return 0; +} + +/** + * pcie_dw_configure() - Configure link capabilities and speed + * + * @regs_base: A pointer to the PCIe controller registers + * @cap_speed: The capabilities and speed to configure + * + * Configure the link capabilities and speed in the PCIe root complex. + */ +static void pcie_dw_configure(const void *regs_base, u32 cap_speed) +{ + /* + * TODO (shadi@marvell.com, sr@denx.de): + * Need to read the serdes speed from the dts and according to it + * configure the PCIe gen + */ + + /* Set link to GEN 3 */ + clrsetbits_le32(regs_base + PCIE_LINK_CTL_2, + TARGET_LINK_SPEED_MASK, cap_speed); + clrsetbits_le32(regs_base + PCIE_LINK_CAPABILITY, + TARGET_LINK_SPEED_MASK, cap_speed); + setbits_le32(regs_base + PCIE_GEN3_EQU_CTRL, GEN3_EQU_EVAL_2MS_DISABLE); +} + +/** + * is_link_up() - Return the link state + * + * @regs_base: A pointer to the PCIe controller registers + * + * Return: 1 (true) for active line and 0 (false) for no link + */ +static int is_link_up(const void *regs_base) +{ + u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; + u32 reg; + + reg = readl(regs_base + PCIE_GLOBAL_STATUS); + if ((reg & mask) == mask) + return 1; + + return 0; +} + +/** + * wait_link_up() - Wait for the link to come up + * + * @regs_base: A pointer to the PCIe controller registers + * + * Return: 1 (true) for active line and 0 (false) for no link (timeout) + */ +static int wait_link_up(const void *regs_base) +{ + unsigned long timeout; + + timeout = get_timer(0) + PCIE_LINK_UP_TIMEOUT_MS; + while (!is_link_up(regs_base)) { + if (get_timer(0) > timeout) + return 0; + }; + + return 1; +} + +/** + * pcie_dw_mvebu_pcie_link_up() - Configure the PCIe root port + * + * @regs_base: A pointer to the PCIe controller registers + * @cap_speed: The capabilities and speed to configure + * + * Configure the PCIe controller root complex depending on the + * requested link capabilities and speed. + * + * Return: 1 (true) for active line and 0 (false) for no link + */ +static int pcie_dw_mvebu_pcie_link_up(const void *regs_base, u32 cap_speed) +{ + if (!is_link_up(regs_base)) { + /* Disable LTSSM state machine to enable configuration */ + clrbits_le32(regs_base + PCIE_GLOBAL_CONTROL, + PCIE_APP_LTSSM_EN); + } + + clrsetbits_le32(regs_base + PCIE_GLOBAL_CONTROL, + PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_OFFSET, + PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_OFFSET); + + /* Set the PCIe master AXI attributes */ + writel(ARCACHE_SHAREABLE_CACHEABLE, regs_base + PCIE_ARCACHE_TRC); + writel(AWCACHE_SHAREABLE_CACHEABLE, regs_base + PCIE_AWCACHE_TRC); + + /* DW pre link configurations */ + pcie_dw_configure(regs_base, cap_speed); + + if (!is_link_up(regs_base)) { + /* Configuration done. Start LTSSM */ + setbits_le32(regs_base + PCIE_GLOBAL_CONTROL, + PCIE_APP_LTSSM_EN); + } + + /* Check that link was established */ + if (!wait_link_up(regs_base)) + return 0; + + /* + * Link can be established in Gen 1. still need to wait + * till MAC nagaotiation is completed + */ + udelay(100); + + return 1; +} + +/** + * pcie_dw_set_host_bars() - Configure the host BARs + * + * @regs_base: A pointer to the PCIe controller registers + * + * Configure the host BARs of the PCIe controller root port so that + * PCI(e) devices may access the system memory. + */ +static void pcie_dw_set_host_bars(const void *regs_base) +{ + u32 size = gd->ram_size; + u64 max_size; + u32 reg; + u32 bar0; + + /* Verify the maximal BAR size */ + reg = readl(regs_base + RESIZABLE_BAR_CAP); + max_size = 1ULL << (5 + (reg + (1 << 4))); + + if (size > max_size) { + size = max_size; + printf("Warning: PCIe BARs can't map all DRAM space\n"); + } + + /* Set the BAR base and size towards DDR */ + bar0 = CFG_SYS_SDRAM_BASE & ~0xf; + bar0 |= PCI_BASE_ADDRESS_MEM_TYPE_32; + writel(CFG_SYS_SDRAM_BASE, regs_base + PCIE_CONFIG_BAR0); + + reg = ((size >> 20) - 1) << 12; + writel(size, regs_base + RESIZABLE_BAR_CTL0); +} + +/** + * pcie_dw_mvebu_probe() - Probe the PCIe bus for active link + * + * @dev: A pointer to the device being operated on + * + * Probe for an active link on the PCIe bus and configure the controller + * to enable this port. + * + * Return: 0 on success, else -ENODEV + */ +static int pcie_dw_mvebu_probe(struct udevice *dev) +{ + struct pcie_dw_mvebu *pcie = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); +#if CONFIG_IS_ENABLED(DM_GPIO) + struct gpio_desc reset_gpio; + + gpio_request_by_name(dev, "marvell,reset-gpio", 0, &reset_gpio, + GPIOD_IS_OUT); + /* + * Issue reset to add-in card trough the dedicated GPIO. + * Some boards are connecting the card reset pin to common system + * reset wire and others are using separate GPIO port. + * In the last case we have to release a reset of the addon card + * using this GPIO. + */ + if (dm_gpio_is_valid(&reset_gpio)) { + dm_gpio_set_value(&reset_gpio, 1); /* assert */ + mdelay(200); + dm_gpio_set_value(&reset_gpio, 0); /* de-assert */ + mdelay(200); + } +#else + debug("PCIE Reset on GPIO support is missing\n"); +#endif /* DM_GPIO */ + + pcie->first_busno = dev_seq(dev); + + /* Don't register host if link is down */ + if (!pcie_dw_mvebu_pcie_link_up(pcie->ctrl_base, LINK_SPEED_GEN_3)) { + printf("PCIE-%d: Link down\n", dev_seq(dev)); + } else { + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", dev_seq(dev), + pcie_dw_get_link_speed(pcie->ctrl_base), + pcie_dw_get_link_width(pcie->ctrl_base), + hose->first_busno); + } + + pcie->region_count = hose->region_count - CONFIG_NR_DRAM_BANKS; + + /* Store the IO and MEM windows settings for future use by the ATU */ + if (pcie->region_count > 1) { + /* IO base */ + pcie->io.phys_start = hose->regions[0].phys_start; + /* IO_bus_addr */ + pcie->io.bus_start = hose->regions[0].bus_start; + /* IO size */ + pcie->io.size = hose->regions[0].size; + } + + /* MEM base */ + pcie->mem.phys_start = hose->regions[pcie->region_count - 1].phys_start; + /* MEM_bus_addr */ + pcie->mem.bus_start = hose->regions[pcie->region_count - 1].bus_start; + /* MEM size */ + pcie->mem.size = hose->regions[pcie->region_count - 1].size; + + pcie_dw_prog_outbound_atu(pcie, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_MEM, pcie->mem.phys_start, + pcie->mem.bus_start, pcie->mem.size); + + /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI_NORMAL */ + clrsetbits_le32(pcie->ctrl_base + PCI_CLASS_REVISION, + 0xffffff << 8, PCI_CLASS_BRIDGE_PCI_NORMAL << 8); + + pcie_dw_set_host_bars(pcie->ctrl_base); + + return 0; +} + +/** + * pcie_dw_mvebu_of_to_plat() - Translate from DT to device state + * + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pcie_dw_mvebu_of_to_plat(struct udevice *dev) +{ + struct pcie_dw_mvebu *pcie = dev_get_priv(dev); + + /* Get the controller base address */ + pcie->ctrl_base = devfdt_get_addr_index_ptr(dev, 0); + if (!pcie->ctrl_base) + return -EINVAL; + + /* Get the config space base address and size */ + pcie->cfg_base = devfdt_get_addr_size_index_ptr(dev, 1, + &pcie->cfg_size); + if (!pcie->cfg_base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops pcie_dw_mvebu_ops = { + .read_config = pcie_dw_mvebu_read_config, + .write_config = pcie_dw_mvebu_write_config, +}; + +static const struct udevice_id pcie_dw_mvebu_ids[] = { + { .compatible = "marvell,armada8k-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_dw_mvebu) = { + .name = "pcie_dw_mvebu", + .id = UCLASS_PCI, + .of_match = pcie_dw_mvebu_ids, + .ops = &pcie_dw_mvebu_ops, + .of_to_plat = pcie_dw_mvebu_of_to_plat, + .probe = pcie_dw_mvebu_probe, + .priv_auto = sizeof(struct pcie_dw_mvebu), +}; diff --git a/drivers/pci/pcie_dw_rockchip.c b/drivers/pci/pcie_dw_rockchip.c new file mode 100644 index 00000000000..1bad51fb3eb --- /dev/null +++ b/drivers/pci/pcie_dw_rockchip.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip DesignWare based PCIe host controller driver + * + * Copyright (c) 2021 Rockchip, Inc. + */ + +#include <clk.h> +#include <dm.h> +#include <generic-phy.h> +#include <pci.h> +#include <power-domain.h> +#include <reset.h> +#include <syscon.h> +#include <asm/arch-rockchip/clock.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <dm/device_compat.h> +#include <linux/bitfield.h> +#include <linux/iopoll.h> +#include <linux/delay.h> +#include <power/regulator.h> + +#include "pcie_dw_common.h" + +DECLARE_GLOBAL_DATA_PTR; + +/** + * struct rk_pcie - RK DW PCIe controller state + * + * @vpcie3v3: The 3.3v power supply for slot + * @apb_base: The base address of vendor regs + * @rst_gpio: The #PERST signal for slot + */ +struct rk_pcie { + /* Must be first member of the struct */ + struct pcie_dw dw; + struct udevice *vpcie3v3; + void *apb_base; + struct phy phy; + struct clk_bulk clks; + struct reset_ctl_bulk rsts; + struct gpio_desc rst_gpio; + u32 gen; + u32 num_lanes; +}; + +/* Parameters for the waiting for iATU enabled routine */ +#define PCIE_CLIENT_GENERAL_DEBUG 0x104 +#define PCIE_CLIENT_HOT_RESET_CTRL 0x180 +#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) +#define PCIE_CLIENT_LTSSM_STATUS 0x300 +#define SMLH_LINKUP BIT(16) +#define RDLH_LINKUP BIT(17) +#define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310 +#define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320 +#define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324 +#define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328 +#define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c +#define PCIE_CLIENT_DBG_FIFO_STATUS 0x350 +#define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000 +#define PCIE_CLIENT_DBF_EN 0xffff0003 + +#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000 + +static int rk_pcie_read(void __iomem *addr, int size, u32 *val) +{ + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return -EOPNOTSUPP; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return -ENODEV; + } + + return 0; +} + +static int rk_pcie_write(void __iomem *addr, int size, u32 val) +{ + if ((uintptr_t)addr & (size - 1)) + return -EOPNOTSUPP; + + if (size == 4) + writel(val, addr); + else if (size == 2) + writew(val, addr); + else if (size == 1) + writeb(val, addr); + else + return -ENODEV; + + return 0; +} + +static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base, + u32 reg, size_t size) +{ + int ret; + u32 val; + + ret = rk_pcie_read(base + reg, size, &val); + if (ret) + dev_err(rk_pcie->dw.dev, "Read APB address failed\n"); + + return val; +} + +static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + int ret; + + ret = rk_pcie_write(base + reg, size, val); + if (ret) + dev_err(rk_pcie->dw.dev, "Write APB address failed\n"); +} + +/** + * rk_pcie_readl_apb() - Read vendor regs + * + * @rk_pcie: Pointer to the PCI controller state + * @reg: Offset of regs + */ +static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg) +{ + return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4); +} + +/** + * rk_pcie_writel_apb() - Write vendor regs + * + * @rk_pcie: Pointer to the PCI controller state + * @reg: Offset of regs + * @val: Value to be writen + */ +static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg, + u32 val) +{ + __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val); +} + +/** + * rk_pcie_configure() - Configure link capabilities and speed + * + * @rk_pcie: Pointer to the PCI controller state + * + * Configure the link capabilities and speed in the PCIe root complex. + */ +static void rk_pcie_configure(struct rk_pcie *pci) +{ + u32 val; + + dw_pcie_dbi_write_enable(&pci->dw, true); + + /* Disable BAR 0 and BAR 1 */ + writel(0, pci->dw.dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + + PCI_BASE_ADDRESS_0); + writel(0, pci->dw.dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + + PCI_BASE_ADDRESS_1); + + clrsetbits_le32(pci->dw.dbi_base + PCIE_LINK_CAPABILITY, + TARGET_LINK_SPEED_MASK, pci->gen); + + clrsetbits_le32(pci->dw.dbi_base + PCIE_LINK_CTL_2, + TARGET_LINK_SPEED_MASK, pci->gen); + + /* Set the number of lanes */ + val = readl(pci->dw.dbi_base + PCIE_PORT_LINK_CONTROL); + val &= ~PORT_LINK_FAST_LINK_MODE; + val |= PORT_LINK_DLL_LINK_EN; + val &= ~PORT_LINK_MODE_MASK; + switch (pci->num_lanes) { + case 1: + val |= PORT_LINK_MODE_1_LANES; + break; + case 2: + val |= PORT_LINK_MODE_2_LANES; + break; + case 4: + val |= PORT_LINK_MODE_4_LANES; + break; + default: + dev_err(pci->dw.dev, "num-lanes %u: invalid value\n", pci->num_lanes); + goto out; + } + writel(val, pci->dw.dbi_base + PCIE_PORT_LINK_CONTROL); + + /* Set link width speed control register */ + val = readl(pci->dw.dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~PORT_LOGIC_LINK_WIDTH_MASK; + switch (pci->num_lanes) { + case 1: + val |= PORT_LOGIC_LINK_WIDTH_1_LANES; + break; + case 2: + val |= PORT_LOGIC_LINK_WIDTH_2_LANES; + break; + case 4: + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; + } + writel(val, pci->dw.dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + +out: + dw_pcie_dbi_write_enable(&pci->dw, false); +} + +static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie) +{ + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0, + PCIE_CLIENT_DBG_TRANSITION_DATA); + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1, + PCIE_CLIENT_DBG_TRANSITION_DATA); + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0, + PCIE_CLIENT_DBG_TRANSITION_DATA); + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1, + PCIE_CLIENT_DBG_TRANSITION_DATA); + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON, + PCIE_CLIENT_DBF_EN); +} + +static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie) +{ + u32 loop; + + debug("ltssm = 0x%x\n", + rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS)); + for (loop = 0; loop < 64; loop++) + debug("fifo_status = 0x%x\n", + rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS)); +} + +static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie) +{ + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0); +} + +static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie) +{ + rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008); +} + +static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie) +{ + rk_pcie_writel_apb(rk_pcie, 0x0, 0xc000c); +} + +static int is_link_up(struct rk_pcie *priv) +{ + u32 val; + + val = rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS); + if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 && + (val & GENMASK(5, 0)) == 0x11) + return 1; + + return 0; +} + +/** + * rk_pcie_link_up() - Wait for the link to come up + * + * @rk_pcie: Pointer to the PCI controller state + * + * Return: 1 (true) for active line and negetive (false) for no link (timeout) + */ +static int rk_pcie_link_up(struct rk_pcie *priv) +{ + int retries; + + if (is_link_up(priv)) { + printf("PCI Link already up before configuration!\n"); + return 1; + } + + /* DW pre link configurations */ + rk_pcie_configure(priv); + + rk_pcie_disable_ltssm(priv); + rk_pcie_link_status_clear(priv); + rk_pcie_enable_debug(priv); + + /* Reset the device */ + if (dm_gpio_is_valid(&priv->rst_gpio)) + dm_gpio_set_value(&priv->rst_gpio, 0); + + /* Enable LTSSM */ + rk_pcie_enable_ltssm(priv); + + /* + * PCIe requires the refclk to be stable for 100ms prior to releasing + * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI + * Express Card Electromechanical Specification, 1.1. However, we don't + * know if the refclk is coming from RC's PHY or external OSC. If it's + * from RC, so enabling LTSSM is the just right place to release #PERST. + */ + mdelay(100); + if (dm_gpio_is_valid(&priv->rst_gpio)) + dm_gpio_set_value(&priv->rst_gpio, 1); + + /* Check if the link is up or not */ + for (retries = 0; retries < 10; retries++) { + if (is_link_up(priv)) + break; + + mdelay(100); + } + + if (retries >= 10) { + dev_err(priv->dw.dev, "PCIe-%d Link Fail\n", + dev_seq(priv->dw.dev)); + return -EIO; + } + + dev_info(priv->dw.dev, "PCIe Link up, LTSSM is 0x%x\n", + rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS)); + rk_pcie_debug_dump(priv); + return 0; +} + +static int rockchip_pcie_init_port(struct udevice *dev) +{ + int ret; + u32 val; + struct rk_pcie *priv = dev_get_priv(dev); + + ret = reset_assert_bulk(&priv->rsts); + if (ret) { + dev_err(dev, "failed to assert resets (ret=%d)\n", ret); + return ret; + } + + /* Set power and maybe external ref clk input */ + ret = regulator_set_enable_if_allowed(priv->vpcie3v3, true); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable vpcie3v3 (ret=%d)\n", ret); + return ret; + } + + ret = generic_phy_init(&priv->phy); + if (ret) { + dev_err(dev, "failed to init phy (ret=%d)\n", ret); + goto err_disable_regulator; + } + + ret = generic_phy_power_on(&priv->phy); + if (ret) { + dev_err(dev, "failed to power on phy (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_deassert_bulk(&priv->rsts); + if (ret) { + dev_err(dev, "failed to deassert resets (ret=%d)\n", ret); + goto err_power_off_phy; + } + + ret = clk_enable_bulk(&priv->clks); + if (ret) { + dev_err(dev, "failed to enable clks (ret=%d)\n", ret); + goto err_deassert_bulk; + } + + /* LTSSM EN ctrl mode */ + val = rk_pcie_readl_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL); + val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16); + rk_pcie_writel_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL, val); + + /* Set RC mode */ + rk_pcie_writel_apb(priv, 0x0, 0xf00040); + pcie_dw_setup_host(&priv->dw); + + ret = rk_pcie_link_up(priv); + if (ret < 0) + goto err_link_up; + + return 0; +err_link_up: + clk_disable_bulk(&priv->clks); +err_deassert_bulk: + reset_assert_bulk(&priv->rsts); +err_power_off_phy: + generic_phy_power_off(&priv->phy); +err_exit_phy: + generic_phy_exit(&priv->phy); +err_disable_regulator: + regulator_set_enable_if_allowed(priv->vpcie3v3, false); + + return ret; +} + +static int rockchip_pcie_parse_dt(struct udevice *dev) +{ + struct rk_pcie *priv = dev_get_priv(dev); + int ret; + + priv->dw.dbi_base = dev_read_addr_index_ptr(dev, 0); + if (!priv->dw.dbi_base) + return -EINVAL; + + dev_dbg(dev, "DBI address is 0x%p\n", priv->dw.dbi_base); + + priv->apb_base = dev_read_addr_index_ptr(dev, 1); + if (!priv->apb_base) + return -EINVAL; + + dev_dbg(dev, "APB address is 0x%p\n", priv->apb_base); + + priv->dw.cfg_base = dev_read_addr_size_index_ptr(dev, 2, + &priv->dw.cfg_size); + if (!priv->dw.cfg_base) + return -EINVAL; + + dev_dbg(dev, "CFG address is 0x%p\n", priv->dw.cfg_base); + + ret = gpio_request_by_name(dev, "reset-gpios", 0, + &priv->rst_gpio, GPIOD_IS_OUT); + if (ret) { + dev_err(dev, "failed to find reset-gpios property\n"); + return ret; + } + + ret = reset_get_bulk(dev, &priv->rsts); + if (ret) { + dev_err(dev, "Can't get reset: %d\n", ret); + goto rockchip_pcie_parse_dt_err_reset_get_bulk; + } + + ret = clk_get_bulk(dev, &priv->clks); + if (ret) { + dev_err(dev, "Can't get clock: %d\n", ret); + goto rockchip_pcie_parse_dt_err_clk_get_bulk; + } + + ret = device_get_supply_regulator(dev, "vpcie3v3-supply", + &priv->vpcie3v3); + if (ret && ret != -ENOENT) { + dev_err(dev, "failed to get vpcie3v3 supply (ret=%d)\n", ret); + goto rockchip_pcie_parse_dt_err_supply_regulator; + } + + ret = generic_phy_get_by_index(dev, 0, &priv->phy); + if (ret) { + dev_err(dev, "failed to get pcie phy (ret=%d)\n", ret); + goto rockchip_pcie_parse_dt_err_phy_get_by_index; + } + + priv->gen = dev_read_u32_default(dev, "max-link-speed", + LINK_SPEED_GEN_3); + + priv->num_lanes = dev_read_u32_default(dev, "num-lanes", 1); + + return 0; + +rockchip_pcie_parse_dt_err_phy_get_by_index: + /* regulators don't need release */ +rockchip_pcie_parse_dt_err_supply_regulator: + clk_release_bulk(&priv->clks); +rockchip_pcie_parse_dt_err_clk_get_bulk: + reset_release_bulk(&priv->rsts); +rockchip_pcie_parse_dt_err_reset_get_bulk: + dm_gpio_free(dev, &priv->rst_gpio); + return ret; +} + +/** + * rockchip_pcie_probe() - Probe the PCIe bus for active link + * + * @dev: A pointer to the device being operated on + * + * Probe for an active link on the PCIe bus and configure the controller + * to enable this port. + * + * Return: 0 on success, else -ENODEV + */ +static int rockchip_pcie_probe(struct udevice *dev) +{ + struct rk_pcie *priv = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int ret = 0; + + priv->dw.first_busno = dev_seq(dev); + priv->dw.dev = dev; + + ret = rockchip_pcie_parse_dt(dev); + if (ret) + return ret; + + ret = rockchip_pcie_init_port(dev); + if (ret) + goto rockchip_pcie_probe_err_init_port; + + dev_info(dev, "PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", + dev_seq(dev), pcie_dw_get_link_speed(&priv->dw), + pcie_dw_get_link_width(&priv->dw), + hose->first_busno); + + + ret = pcie_dw_prog_outbound_atu_unroll(&priv->dw, + PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, + priv->dw.mem.phys_start, + priv->dw.mem.bus_start, + priv->dw.mem.size); + if (!ret) + return ret; + +rockchip_pcie_probe_err_init_port: + clk_release_bulk(&priv->clks); + reset_release_bulk(&priv->rsts); + dm_gpio_free(dev, &priv->rst_gpio); + + return ret; +} + +static const struct dm_pci_ops rockchip_pcie_ops = { + .read_config = pcie_dw_read_config, + .write_config = pcie_dw_write_config, +}; + +static const struct udevice_id rockchip_pcie_ids[] = { + { .compatible = "rockchip,rk3568-pcie" }, + { .compatible = "rockchip,rk3588-pcie" }, + { } +}; + +U_BOOT_DRIVER(rockchip_dw_pcie) = { + .name = "pcie_dw_rockchip", + .id = UCLASS_PCI, + .of_match = rockchip_pcie_ids, + .ops = &rockchip_pcie_ops, + .probe = rockchip_pcie_probe, + .priv_auto = sizeof(struct rk_pcie), +}; diff --git a/drivers/pci/pcie_dw_sifive.c b/drivers/pci/pcie_dw_sifive.c new file mode 100644 index 00000000000..6285edf4b03 --- /dev/null +++ b/drivers/pci/pcie_dw_sifive.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SiFive FU740 DesignWare PCIe Controller + * + * Copyright (C) 2020-2021 SiFive, Inc. + * + * Based in early part on the i.MX6 PCIe host controller shim which is: + * + * Copyright (C) 2013 Kosagi + * http://www.kosagi.com + * + * Based on driver from author: Alan Mikhak <amikhak@wirelessfabric.com> + */ +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <generic-phy.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <pci.h> +#include <pci_ep.h> +#include <pci_ids.h> +#include <regmap.h> +#include <reset.h> +#include <syscon.h> + +#include "pcie_dw_common.h" + +struct pcie_sifive { + /* Must be first member of the struct */ + struct pcie_dw dw; + + /* private control regs */ + void __iomem *priv_base; + + /* reset, power, clock resources */ + int sys_int_pin; + struct gpio_desc pwren_gpio; + struct gpio_desc reset_gpio; + struct clk aux_ck; + struct reset_ctl reset; +}; + +enum pcie_sifive_devtype { + SV_PCIE_UNKNOWN_TYPE = 0, + SV_PCIE_ENDPOINT_TYPE = 1, + SV_PCIE_HOST_TYPE = 3 +}; + +#define ASSERTION_DELAY 100 +#define PCIE_PERST_ASSERT 0x0 +#define PCIE_PERST_DEASSERT 0x1 +#define PCIE_PHY_RESET 0x1 +#define PCIE_PHY_RESET_DEASSERT 0x0 +#define GPIO_LOW 0x0 +#define GPIO_HIGH 0x1 +#define PCIE_PHY_SEL 0x1 + +#define sv_info(sv, fmt, arg...) printf(fmt, ## arg) +#define sv_warn(sv, fmt, arg...) printf(fmt, ## arg) +#define sv_debug(sv, fmt, arg...) debug(fmt, ## arg) +#define sv_err(sv, fmt, arg...) printf(fmt, ## arg) + +/* Doorbell Interface */ +#define DBI_OFFSET 0x0 +#define DBI_SIZE 0x1000 + +#define PL_OFFSET 0x700 + +#define PHY_DEBUG_R0 (PL_OFFSET + 0x28) + +#define PHY_DEBUG_R1 (PL_OFFSET + 0x2c) +#define PHY_DEBUG_R1_LINK_UP (0x1 << 4) +#define PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) + +#define PCIE_MISC_CONTROL_1 0x8bc +#define DBI_RO_WR_EN BIT(0) + +/* pcie reset */ +#define PCIEX8MGMT_PERST_N 0x0 + +/* LTSSM */ +#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10 +#define LTSSM_ENABLE_BIT BIT(0) + +/* phy reset */ +#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18 + +/* device type */ +#define PCIEX8MGMT_DEVICE_TYPE 0x708 +#define DEVICE_TYPE_EP 0x0 +#define DEVICE_TYPE_RC 0x4 + +/* phy control registers*/ +#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860 +#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870 +#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878 +#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880 +#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888 +#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890 +#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898 +#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0 +#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0 +#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8 +#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0 +#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8 +#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0 +#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8 + +#define PCIEX8MGMT_LANE_NUM 8 +#define PCIEX8MGMT_LANE 0x1008 +#define PCIEX8MGMT_LANE_OFF 0x100 +#define PCIEX8MGMT_TERM_MODE 0x0e21 + +#define PCIE_CAP_BASE 0x70 +#define PCI_CONFIG(r) (DBI_OFFSET + (r)) +#define PCIE_CAPABILITIES(r) PCI_CONFIG(PCIE_CAP_BASE + (r)) + +/* Link capability */ +#define PF0_PCIE_CAP_LINK_CAP PCIE_CAPABILITIES(0xc) +#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xf +#define PCIE_LINK_CAP_MAX_SPEED_GEN1 BIT(0) +#define PCIE_LINK_CAP_MAX_SPEED_GEN2 BIT(1) +#define PCIE_LINK_CAP_MAX_SPEED_GEN3 BIT(2) +#define PCIE_LINK_CAP_MAX_SPEED_GEN4 BIT(3) + +static enum pcie_sifive_devtype pcie_sifive_get_devtype(struct pcie_sifive *sv) +{ + u32 val; + + val = readl(sv->priv_base + PCIEX8MGMT_DEVICE_TYPE); + switch (val) { + case DEVICE_TYPE_RC: + return SV_PCIE_HOST_TYPE; + case DEVICE_TYPE_EP: + return SV_PCIE_ENDPOINT_TYPE; + default: + return SV_PCIE_UNKNOWN_TYPE; + } +} + +static void pcie_sifive_priv_set_state(struct pcie_sifive *sv, u32 reg, + u32 bits, int state) +{ + u32 val; + + val = readl(sv->priv_base + reg); + val = state ? (val | bits) : (val & !bits); + writel(val, sv->priv_base + reg); +} + +static void pcie_sifive_assert_reset(struct pcie_sifive *sv) +{ + dm_gpio_set_value(&sv->reset_gpio, GPIO_LOW); + writel(PCIE_PERST_ASSERT, sv->priv_base + PCIEX8MGMT_PERST_N); + mdelay(ASSERTION_DELAY); +} + +static void pcie_sifive_power_on(struct pcie_sifive *sv) +{ + dm_gpio_set_value(&sv->pwren_gpio, GPIO_HIGH); + mdelay(ASSERTION_DELAY); +} + +static void pcie_sifive_deassert_reset(struct pcie_sifive *sv) +{ + writel(PCIE_PERST_DEASSERT, sv->priv_base + PCIEX8MGMT_PERST_N); + dm_gpio_set_value(&sv->reset_gpio, GPIO_HIGH); + mdelay(ASSERTION_DELAY); +} + +static int pcie_sifive_setphy(const u8 phy, const u8 write, + const u16 addr, const u16 wrdata, + u16 *rddata, struct pcie_sifive *sv) +{ + unsigned char ack = 0; + + if (!(phy == 0 || phy == 1)) + return -2; + + /* setup phy para */ + writel(addr, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_ADDR : + PCIEX8MGMT_PHY0_CR_PARA_ADDR)); + + if (write) + writel(wrdata, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_WR_DATA : + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA)); + + /* enable access if write */ + if (write) + writel(1, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_WR_EN : + PCIEX8MGMT_PHY0_CR_PARA_WR_EN)); + else + writel(1, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_RD_EN : + PCIEX8MGMT_PHY0_CR_PARA_RD_EN)); + + /* wait for wait_idle */ + do { + u32 val; + + val = readl(sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_ACK : + PCIEX8MGMT_PHY0_CR_PARA_ACK)); + if (val) { + ack = 1; + if (!write) + readl(sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_RD_DATA : + PCIEX8MGMT_PHY0_CR_PARA_RD_DATA)); + mdelay(1); + } + } while (!ack); + + /* clear */ + if (write) + writel(0, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_WR_EN : + PCIEX8MGMT_PHY0_CR_PARA_WR_EN)); + else + writel(0, sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_RD_EN : + PCIEX8MGMT_PHY0_CR_PARA_RD_EN)); + + while (readl(sv->priv_base + + (phy ? PCIEX8MGMT_PHY1_CR_PARA_ACK : + PCIEX8MGMT_PHY0_CR_PARA_ACK))) { + /* wait for ~wait_idle */ + } + + return 0; +} + +static void pcie_sifive_init_phy(struct pcie_sifive *sv) +{ + int lane; + + /* enable phy cr_para_sel interfaces */ + writel(PCIE_PHY_SEL, sv->priv_base + PCIEX8MGMT_PHY0_CR_PARA_SEL); + writel(PCIE_PHY_SEL, sv->priv_base + PCIEX8MGMT_PHY1_CR_PARA_SEL); + mdelay(1); + + /* set PHY AC termination mode */ + for (lane = 0; lane < PCIEX8MGMT_LANE_NUM; lane++) { + pcie_sifive_setphy(0, 1, + PCIEX8MGMT_LANE + + (PCIEX8MGMT_LANE_OFF * lane), + PCIEX8MGMT_TERM_MODE, NULL, sv); + pcie_sifive_setphy(1, 1, + PCIEX8MGMT_LANE + + (PCIEX8MGMT_LANE_OFF * lane), + PCIEX8MGMT_TERM_MODE, NULL, sv); + } +} + +static int pcie_sifive_check_link(struct pcie_sifive *sv) +{ + u32 val; + + val = readl(sv->dw.dbi_base + PHY_DEBUG_R1); + return (val & PHY_DEBUG_R1_LINK_UP) && + !(val & PHY_DEBUG_R1_LINK_IN_TRAINING); +} + +static void pcie_sifive_force_gen1(struct pcie_sifive *sv) +{ + u32 val, linkcap; + + /* + * Force Gen1 operation when starting the link. In case the link is + * started in Gen2 mode, there is a possibility the devices on the + * bus will not be detected at all. This happens with PCIe switches. + */ + + /* ctrl_ro_wr_enable */ + val = readl(sv->dw.dbi_base + PCIE_MISC_CONTROL_1); + val |= DBI_RO_WR_EN; + writel(val, sv->dw.dbi_base + PCIE_MISC_CONTROL_1); + + /* configure link cap */ + linkcap = readl(sv->dw.dbi_base + PF0_PCIE_CAP_LINK_CAP); + linkcap |= PCIE_LINK_CAP_MAX_SPEED_MASK; + writel(linkcap, sv->dw.dbi_base + PF0_PCIE_CAP_LINK_CAP); + + /* ctrl_ro_wr_disable */ + val &= ~DBI_RO_WR_EN; + writel(val, sv->dw.dbi_base + PCIE_MISC_CONTROL_1); +} + +static void pcie_sifive_print_phy_debug(struct pcie_sifive *sv) +{ + sv_err(sv, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", + readl(sv->dw.dbi_base + PHY_DEBUG_R0), + readl(sv->dw.dbi_base + PHY_DEBUG_R1)); +} + +static int pcie_sifive_wait_for_link(struct pcie_sifive *sv) +{ + u32 val; + int timeout; + + /* Wait for the link to train */ + mdelay(20); + timeout = 20; + + do { + mdelay(1); + } while (--timeout && !pcie_sifive_check_link(sv)); + + val = readl(sv->dw.dbi_base + PHY_DEBUG_R1); + if (!(val & PHY_DEBUG_R1_LINK_UP) || + (val & PHY_DEBUG_R1_LINK_IN_TRAINING)) { + sv_info(sv, "Failed to negotiate PCIe link!\n"); + pcie_sifive_print_phy_debug(sv); + writel(PCIE_PHY_RESET, + sv->priv_base + PCIEX8MGMT_APP_HOLD_PHY_RST); + return -ETIMEDOUT; + } + + return 0; +} + +static int pcie_sifive_start_link(struct pcie_sifive *sv) +{ + if (pcie_sifive_check_link(sv)) + return -EALREADY; + + pcie_sifive_force_gen1(sv); + + /* set ltssm */ + pcie_sifive_priv_set_state(sv, PCIEX8MGMT_APP_LTSSM_ENABLE, + LTSSM_ENABLE_BIT, 1); + return 0; +} + +static int pcie_sifive_init_port(struct udevice *dev, + enum pcie_sifive_devtype mode) +{ + struct pcie_sifive *sv = dev_get_priv(dev); + int ret; + + /* Power on reset */ + pcie_sifive_assert_reset(sv); + pcie_sifive_power_on(sv); + pcie_sifive_deassert_reset(sv); + + /* Enable pcieauxclk */ + ret = clk_enable(&sv->aux_ck); + if (ret) + dev_err(dev, "unable to enable pcie_aux clock\n"); + + /* + * assert hold_phy_rst (hold the controller LTSSM in reset + * after power_up_rst_n for register programming with cr_para) + */ + writel(PCIE_PHY_RESET, sv->priv_base + PCIEX8MGMT_APP_HOLD_PHY_RST); + + /* deassert power_up_rst_n */ + ret = reset_deassert(&sv->reset); + if (ret < 0) { + dev_err(dev, "failed to deassert reset"); + return -EINVAL; + } + + pcie_sifive_init_phy(sv); + + /* disable pcieauxclk */ + clk_disable(&sv->aux_ck); + + /* deassert hold_phy_rst */ + writel(PCIE_PHY_RESET_DEASSERT, + sv->priv_base + PCIEX8MGMT_APP_HOLD_PHY_RST); + + /* enable pcieauxclk */ + clk_enable(&sv->aux_ck); + + /* Set desired mode while core is not operational */ + if (mode == SV_PCIE_HOST_TYPE) + writel(DEVICE_TYPE_RC, + sv->priv_base + PCIEX8MGMT_DEVICE_TYPE); + else + writel(DEVICE_TYPE_EP, + sv->priv_base + PCIEX8MGMT_DEVICE_TYPE); + + /* Confirm desired mode from operational core */ + if (pcie_sifive_get_devtype(sv) != mode) + return -EINVAL; + + pcie_dw_setup_host(&sv->dw); + + if (pcie_sifive_start_link(sv) == -EALREADY) + sv_info(sv, "PCIe link is already up\n"); + else if (pcie_sifive_wait_for_link(sv) == -ETIMEDOUT) + return -ETIMEDOUT; + + return 0; +} + +static int pcie_sifive_probe(struct udevice *dev) +{ + struct pcie_sifive *sv = dev_get_priv(dev); + struct udevice *parent = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(parent); + int err; + + sv->dw.first_busno = dev_seq(dev); + sv->dw.dev = dev; + + err = pcie_sifive_init_port(dev, SV_PCIE_HOST_TYPE); + if (err) { + sv_info(sv, "Failed to init port.\n"); + return err; + } + + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", + dev_seq(dev), pcie_dw_get_link_speed(&sv->dw), + pcie_dw_get_link_width(&sv->dw), + hose->first_busno); + + return pcie_dw_prog_outbound_atu_unroll(&sv->dw, + PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, + sv->dw.mem.phys_start, + sv->dw.mem.bus_start, + sv->dw.mem.size); +} + +static void __iomem *get_fdt_addr(struct udevice *dev, const char *name) +{ + fdt_addr_t addr; + + addr = dev_read_addr_name(dev, name); + + return (addr == FDT_ADDR_T_NONE) ? NULL : (void __iomem *)addr; +} + +static int pcie_sifive_of_to_plat(struct udevice *dev) +{ + struct pcie_sifive *sv = dev_get_priv(dev); + int err; + + /* get designware DBI base addr */ + sv->dw.dbi_base = get_fdt_addr(dev, "dbi"); + if (!sv->dw.dbi_base) + return -EINVAL; + + /* get private control base addr */ + sv->priv_base = get_fdt_addr(dev, "mgmt"); + if (!sv->priv_base) + return -EINVAL; + + gpio_request_by_name(dev, "pwren-gpios", 0, &sv->pwren_gpio, + GPIOD_IS_OUT); + + if (!dm_gpio_is_valid(&sv->pwren_gpio)) { + sv_info(sv, "pwren_gpio is invalid\n"); + return -EINVAL; + } + + gpio_request_by_name(dev, "reset-gpios", 0, &sv->reset_gpio, + GPIOD_IS_OUT); + + if (!dm_gpio_is_valid(&sv->reset_gpio)) { + sv_info(sv, "reset_gpio is invalid\n"); + return -EINVAL; + } + + err = clk_get_by_index(dev, 0, &sv->aux_ck); + if (err) { + sv_info(sv, "clk_get_by_index(aux_ck) failed: %d\n", err); + return err; + } + + err = reset_get_by_index(dev, 0, &sv->reset); + if (err) { + sv_info(sv, "reset_get_by_index(reset) failed: %d\n", err); + return err; + } + + return 0; +} + +static const struct dm_pci_ops pcie_sifive_ops = { + .read_config = pcie_dw_read_config, + .write_config = pcie_dw_write_config, +}; + +static const struct udevice_id pcie_sifive_ids[] = { + { .compatible = "sifive,fu740-pcie" }, + {} +}; + +U_BOOT_DRIVER(pcie_sifive) = { + .name = "pcie_sifive", + .id = UCLASS_PCI, + .of_match = pcie_sifive_ids, + .ops = &pcie_sifive_ops, + .of_to_plat = pcie_sifive_of_to_plat, + .probe = pcie_sifive_probe, + .priv_auto = sizeof(struct pcie_sifive), +}; diff --git a/drivers/pci/pcie_dw_ti.c b/drivers/pci/pcie_dw_ti.c new file mode 100644 index 00000000000..78a5d035865 --- /dev/null +++ b/drivers/pci/pcie_dw_ti.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Texas Instruments, Inc + */ + +#include <dm.h> +#include <log.h> +#include <pci.h> +#include <generic-phy.h> +#include <power-domain.h> +#include <regmap.h> +#include <syscon.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> + +#include "pcie_dw_common.h" + +DECLARE_GLOBAL_DATA_PTR; + +#define PCIE_VENDORID_MASK GENMASK(15, 0) +#define PCIE_DEVICEID_SHIFT 16 + +#define PCIE_LINK_CAPABILITY 0x7c +#define PCIE_LINK_CTL_2 0xa0 +#define TARGET_LINK_SPEED_MASK 0xf +#define LINK_SPEED_GEN_1 0x1 +#define LINK_SPEED_GEN_2 0x2 +#define LINK_SPEED_GEN_3 0x3 + +#define PCIE_MISC_CONTROL_1_OFF 0x8bc +#define PCIE_DBI_RO_WR_EN BIT(0) + +#define PLR_OFFSET 0x700 +#define PCIE_PORT_DEBUG0 (PLR_OFFSET + 0x28) +#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f +#define PORT_LOGIC_LTSSM_STATE_L0 0x11 + +#define PCIE_LINK_UP_TIMEOUT_MS 100 + +/* Offsets from App base */ +#define PCIE_CMD_STATUS 0x04 +#define LTSSM_EN_VAL BIT(0) + + +#define AM654_PCIE_DEV_TYPE_MASK 0x3 +#define EP 0x0 +#define LEG_EP 0x1 +#define RC 0x2 + +/** + * struct pcie_dw_ti - TI DW PCIe controller state + * + * @pci: The common PCIe DW structure + * @app_base: The base address of application register space + */ +struct pcie_dw_ti { + /* Must be first member of the struct */ + struct pcie_dw dw; + void *app_base; +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE, + DW_PCIE_EP_TYPE, + DW_PCIE_LEG_EP_TYPE, + DW_PCIE_RC_TYPE, +}; + +/** + * pcie_dw_configure() - Configure link capabilities and speed + * + * @regs_base: A pointer to the PCIe controller registers + * @cap_speed: The capabilities and speed to configure + * + * Configure the link capabilities and speed in the PCIe root complex. + */ +static void pcie_dw_configure(struct pcie_dw_ti *pci, u32 cap_speed) +{ + u32 val; + + dw_pcie_dbi_write_enable(&pci->dw, true); + + val = readl(pci->dw.dbi_base + PCIE_LINK_CAPABILITY); + val &= ~TARGET_LINK_SPEED_MASK; + val |= cap_speed; + writel(val, pci->dw.dbi_base + PCIE_LINK_CAPABILITY); + + val = readl(pci->dw.dbi_base + PCIE_LINK_CTL_2); + val &= ~TARGET_LINK_SPEED_MASK; + val |= cap_speed; + writel(val, pci->dw.dbi_base + PCIE_LINK_CTL_2); + + dw_pcie_dbi_write_enable(&pci->dw, false); +} + +/** + * is_link_up() - Return the link state + * + * @regs_base: A pointer to the PCIe DBICS registers + * + * Return: 1 (true) for active line and 0 (false) for no link + */ +static int is_link_up(struct pcie_dw_ti *pci) +{ + u32 val; + + val = readl(pci->dw.dbi_base + PCIE_PORT_DEBUG0); + val &= PORT_LOGIC_LTSSM_STATE_MASK; + + return (val == PORT_LOGIC_LTSSM_STATE_L0); +} + +/** + * wait_link_up() - Wait for the link to come up + * + * @regs_base: A pointer to the PCIe controller registers + * + * Return: 1 (true) for active line and 0 (false) for no link (timeout) + */ +static int wait_link_up(struct pcie_dw_ti *pci) +{ + unsigned long timeout; + + timeout = get_timer(0) + PCIE_LINK_UP_TIMEOUT_MS; + while (!is_link_up(pci)) { + if (get_timer(0) > timeout) + return 0; + }; + + return 1; +} + +static int pcie_dw_ti_pcie_link_up(struct pcie_dw_ti *pci, u32 cap_speed) +{ + u32 val; + + if (is_link_up(pci)) { + printf("PCI Link already up before configuration!\n"); + return 1; + } + + /* DW pre link configurations */ + pcie_dw_configure(pci, cap_speed); + + /* Initiate link training */ + val = readl(pci->app_base + PCIE_CMD_STATUS); + val |= LTSSM_EN_VAL; + writel(val, pci->app_base + PCIE_CMD_STATUS); + + /* Check that link was established */ + if (!wait_link_up(pci)) + return 0; + + /* + * Link can be established in Gen 1. still need to wait + * till MAC nagaotiation is completed + */ + udelay(100); + + return 1; +} + +static int pcie_am654_set_mode(struct pcie_dw_ti *pci, + enum dw_pcie_device_mode mode) +{ + struct regmap *syscon; + u32 val; + u32 mask; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(pci->dw.dev, + "ti,syscon-pcie-mode"); + if (IS_ERR(syscon)) + return 0; + + mask = AM654_PCIE_DEV_TYPE_MASK; + + switch (mode) { + case DW_PCIE_RC_TYPE: + val = RC; + break; + case DW_PCIE_EP_TYPE: + val = EP; + break; + default: + dev_err(pci->dw.dev, "INVALID device type %d\n", mode); + return -EINVAL; + } + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) { + dev_err(pci->dw.dev, "failed to set pcie mode\n"); + return ret; + } + + return 0; +} + +static int pcie_dw_init_id(struct pcie_dw_ti *pci) +{ + struct regmap *devctrl_regs; + unsigned int id; + int ret; + + devctrl_regs = syscon_regmap_lookup_by_phandle(pci->dw.dev, + "ti,syscon-pcie-id"); + if (IS_ERR(devctrl_regs)) + return PTR_ERR(devctrl_regs); + + ret = regmap_read(devctrl_regs, 0, &id); + if (ret) + return ret; + + dw_pcie_dbi_write_enable(&pci->dw, true); + writew(id & PCIE_VENDORID_MASK, pci->dw.dbi_base + PCI_VENDOR_ID); + writew(id >> PCIE_DEVICEID_SHIFT, pci->dw.dbi_base + PCI_DEVICE_ID); + dw_pcie_dbi_write_enable(&pci->dw, false); + + return 0; +} + +/** + * pcie_dw_ti_probe() - Probe the PCIe bus for active link + * + * @dev: A pointer to the device being operated on + * + * Probe for an active link on the PCIe bus and configure the controller + * to enable this port. + * + * Return: 0 on success, else -ENODEV + */ +static int pcie_dw_ti_probe(struct udevice *dev) +{ + struct pcie_dw_ti *pci = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + struct power_domain pci_pwrdmn; + struct phy phy0, phy1; + int ret; + + ret = power_domain_get_by_index(dev, &pci_pwrdmn, 0); + if (ret) { + dev_err(dev, "failed to get power domain\n"); + return ret; + } + + ret = power_domain_on(&pci_pwrdmn); + if (ret) { + dev_err(dev, "Power domain on failed\n"); + return ret; + } + + ret = generic_phy_get_by_name(dev, "pcie-phy0", &phy0); + if (ret) { + dev_err(dev, "Unable to get phy0"); + return ret; + } + generic_phy_reset(&phy0); + generic_phy_init(&phy0); + generic_phy_power_on(&phy0); + + ret = generic_phy_get_by_name(dev, "pcie-phy1", &phy1); + if (ret) { + dev_err(dev, "Unable to get phy1"); + return ret; + } + generic_phy_reset(&phy1); + generic_phy_init(&phy1); + generic_phy_power_on(&phy1); + + pci->dw.first_busno = dev_seq(dev); + pci->dw.dev = dev; + + pcie_dw_setup_host(&pci->dw); + pcie_dw_init_id(pci); + + if (device_is_compatible(dev, "ti,am654-pcie-rc")) + pcie_am654_set_mode(pci, DW_PCIE_RC_TYPE); + + if (!pcie_dw_ti_pcie_link_up(pci, LINK_SPEED_GEN_2)) { + printf("PCIE-%d: Link down\n", dev_seq(dev)); + return -ENODEV; + } + + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", dev_seq(dev), + pcie_dw_get_link_speed(&pci->dw), + pcie_dw_get_link_width(&pci->dw), + hose->first_busno); + + pcie_dw_prog_outbound_atu_unroll(&pci->dw, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, + pci->dw.mem.phys_start, + pci->dw.mem.bus_start, pci->dw.mem.size); + + return 0; +} + +/** + * pcie_dw_ti_of_to_plat() - Translate from DT to device state + * + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pcie_dw_ti_of_to_plat(struct udevice *dev) +{ + struct pcie_dw_ti *pcie = dev_get_priv(dev); + + /* Get the controller base address */ + pcie->dw.dbi_base = (void *)dev_read_addr_name(dev, "dbics"); + if ((fdt_addr_t)pcie->dw.dbi_base == FDT_ADDR_T_NONE) + return -EINVAL; + + /* Get the config space base address and size */ + pcie->dw.cfg_base = (void *)dev_read_addr_size_name(dev, "config", + &pcie->dw.cfg_size); + if ((fdt_addr_t)pcie->dw.cfg_base == FDT_ADDR_T_NONE) + return -EINVAL; + + /* Get the iATU base address and size */ + pcie->dw.atu_base = (void *)dev_read_addr_name(dev, "atu"); + if ((fdt_addr_t)pcie->dw.atu_base == FDT_ADDR_T_NONE) + return -EINVAL; + + /* Get the app base address and size */ + pcie->app_base = (void *)dev_read_addr_name(dev, "app"); + if ((fdt_addr_t)pcie->app_base == FDT_ADDR_T_NONE) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops pcie_dw_ti_ops = { + .read_config = pcie_dw_read_config, + .write_config = pcie_dw_write_config, +}; + +static const struct udevice_id pcie_dw_ti_ids[] = { + { .compatible = "ti,am654-pcie-rc" }, + { } +}; + +U_BOOT_DRIVER(pcie_dw_ti) = { + .name = "pcie_dw_ti", + .id = UCLASS_PCI, + .of_match = pcie_dw_ti_ids, + .ops = &pcie_dw_ti_ops, + .of_to_plat = pcie_dw_ti_of_to_plat, + .probe = pcie_dw_ti_probe, + .priv_auto = sizeof(struct pcie_dw_ti), +}; diff --git a/drivers/pci/pcie_ecam_generic.c b/drivers/pci/pcie_ecam_generic.c new file mode 100644 index 00000000000..3cb2bbbccb4 --- /dev/null +++ b/drivers/pci/pcie_ecam_generic.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic PCIE host provided by e.g. QEMU + * + * Heavily based on drivers/pci/pcie_xilinx.c + * + * Copyright (C) 2016 Imagination Technologies + */ + +#include <dm.h> +#include <pci.h> +#include <linux/ioport.h> +#include <linux/printk.h> + +#include <asm/io.h> + +#define TYPE_PCI 0x1 + +/** + * struct generic_ecam_pcie - generic_ecam PCIe controller state + * @cfg_base: The base address of memory mapped configuration space + */ +struct generic_ecam_pcie { + void *cfg_base; + pci_size_t size; + int first_busno; +}; + +/** + * pci_generic_ecam_conf_address() - Calculate the address of a config access + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @paddress: Pointer to the pointer to write the calculates address to + * + * Calculates the address that should be accessed to perform a PCIe + * configuration space access for a given device identified by the PCIe + * controller device @pcie and the bus, device & function numbers in @bdf. If + * access to the device is not valid then the function will return an error + * code. Otherwise the address to access will be written to the pointer pointed + * to by @paddress. + */ +static int pci_generic_ecam_conf_address(const struct udevice *bus, + pci_dev_t bdf, uint offset, + void **paddress) +{ + struct generic_ecam_pcie *pcie = dev_get_priv(bus); + void *addr; + + addr = pcie->cfg_base; + + if (dev_get_driver_data(bus) == TYPE_PCI) { + addr += ((PCI_BUS(bdf) - pcie->first_busno) << 16) | + (PCI_DEV(bdf) << 11) | (PCI_FUNC(bdf) << 8) | offset; + } else { + addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf) - pcie->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + } + *paddress = addr; + + return 0; +} + +static bool pci_generic_ecam_addr_valid(const struct udevice *bus, + pci_dev_t bdf) +{ + struct generic_ecam_pcie *pcie = dev_get_priv(bus); + int num_buses = DIV_ROUND_UP(pcie->size, 1 << 16); + + return (PCI_BUS(bdf) >= pcie->first_busno && + PCI_BUS(bdf) < pcie->first_busno + num_buses); +} + +/** + * pci_generic_ecam_read_config() - Read from configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + */ +static int pci_generic_ecam_read_config(const struct udevice *bus, + pci_dev_t bdf, uint offset, + ulong *valuep, enum pci_size_t size) +{ + if (!pci_generic_ecam_addr_valid(bus, bdf)) { + *valuep = pci_get_ff(size); + return 0; + } + + return pci_generic_mmap_read_config(bus, pci_generic_ecam_conf_address, + bdf, offset, valuep, size); +} + +/** + * pci_generic_ecam_write_config() - Write to configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + */ +static int pci_generic_ecam_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + if (!pci_generic_ecam_addr_valid(bus, bdf)) + return 0; + + return pci_generic_mmap_write_config(bus, pci_generic_ecam_conf_address, + bdf, offset, value, size); +} + +/** + * pci_generic_ecam_of_to_plat() - Translate from DT to device state + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pci_generic_ecam_of_to_plat(struct udevice *dev) +{ + struct generic_ecam_pcie *pcie = dev_get_priv(dev); + ofnode node = dev_ofnode(dev); + struct resource reg_res; + int err; + + err = ofnode_read_resource(node, 0, ®_res); + if (err < 0) { + pr_err("\"reg\" resource not found\n"); + return err; + } + + pcie->size = resource_size(®_res); + pcie->cfg_base = map_physmem(reg_res.start, pcie->size, MAP_NOCACHE); + + return 0; +} + +static int pci_generic_ecam_probe(struct udevice *dev) +{ + struct generic_ecam_pcie *pcie = dev_get_priv(dev); + + pcie->first_busno = dev_seq(dev); + + return 0; +} + +static const struct dm_pci_ops pci_generic_ecam_ops = { + .read_config = pci_generic_ecam_read_config, + .write_config = pci_generic_ecam_write_config, +}; + +static const struct udevice_id pci_generic_ecam_ids[] = { + { .compatible = "pci-host-ecam-generic" /* PCI-E */ }, + { .compatible = "pci-host-cam-generic", .data = TYPE_PCI }, + { } +}; + +U_BOOT_DRIVER(pci_generic_ecam) = { + .name = "pci_generic_ecam", + .id = UCLASS_PCI, + .of_match = pci_generic_ecam_ids, + .ops = &pci_generic_ecam_ops, + .probe = pci_generic_ecam_probe, + .of_to_plat = pci_generic_ecam_of_to_plat, + .priv_auto = sizeof(struct generic_ecam_pcie), +}; diff --git a/drivers/pci/pcie_ecam_synquacer.c b/drivers/pci/pcie_ecam_synquacer.c new file mode 100644 index 00000000000..fc855dfca4e --- /dev/null +++ b/drivers/pci/pcie_ecam_synquacer.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SynQuacer PCIE host driver + * + * Based on drivers/pci/pcie_ecam_generic.c + * + * Copyright (C) 2016 Imagination Technologies + * Copyright (C) 2021 Linaro Ltd. + */ + +#include <dm.h> +#include <pci.h> +#include <log.h> + +#include <asm/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> + +/* iATU registers */ +#define IATU_VIEWPORT_OFF 0x900 +#define IATU_VIEWPORT_INBOUND BIT(31) +#define IATU_VIEWPORT_OUTBOUND 0 +#define IATU_VIEWPORT_REGION_INDEX(idx) ((idx) & 7) + +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0 0x904 +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM 0x0 +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO 0x2 +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0 0x4 +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1 0x5 +#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH BIT(12) + +#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0 0x908 +#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN BIT(31) +#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE BIT(28) +#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT 0xF +#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_64BIT 0xFF + +#define IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 0x90C +#define IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 0x910 +#define IATU_LIMIT_ADDR_OFF_OUTBOUND_0 0x914 +#define IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 0x918 +#define IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 0x91C + +/* Clock and resets */ +#define CORE_CONTROL 0x000 +#define APP_LTSSM_ENABLE BIT(4) +#define DEVICE_TYPE (BIT(3) | BIT(2) | BIT(1) | BIT(0)) + +#define AXI_CLK_STOP 0x004 +#define DBI_ACLK_STOP BIT(8) +#define SLV_ACLK_STOP BIT(4) +#define MSTR_ACLK_STOP BIT(0) +#define DBI_CSYSREQ_REG BIT(9) +#define SLV_CSYSREQ_REG BIT(5) +#define MSTR_CSYSREQ_REG BIT(1) + +#define RESET_CONTROL_1 0x00C +#define PERST_N_O_REG BIT(5) +#define PERST_N_I_REG BIT(4) +#define BUTTON_RST_N_REG BIT(1) +#define PWUP_RST_N_REG BIT(0) + +#define RESET_CONTROL_2 0x010 + +#define RESET_SELECT_1 0x014 +#define SQU_RST_SEL BIT(29) +#define PHY_RST_SEL BIT(28) +#define PWR_RST_SEL BIT(24) +#define STI_RST_SEL BIT(20) +#define N_STI_RST_SEL BIT(16) +#define CORE_RST_SEL BIT(12) +#define PERST_SEL BIT(4) +#define BUTTON_RST_SEL BIT(1) +#define PWUP_RST_SEL BIT(0) + +#define RESET_SELECT_2 0x018 +#define DBI_ARST_SEL BIT(8) +#define SLV_ARST_SEL BIT(4) +#define MSTR_ARST_SEL BIT(0) + +#define EM_CONTROL 0x030 +#define PRE_DET_STT_REG BIT(4) + +#define EM_SELECT 0x034 +#define PRE_DET_STT_SEL BIT(4) + +#define PM_CONTROL_2 0x050 +#define SYS_AUX_PWR_DET BIT(8) + +#define PHY_CONFIG_COM_6 0x114 +#define PIPE_PORT_SEL GENMASK(1, 0) + +#define LINK_MONITOR 0x210 +#define SMLH_LINK_UP BIT(0) + +#define LINK_CAPABILITIES_REG 0x07C +#define PCIE_CAP_MAX_LINK_WIDTH GENMASK(7, 4) +#define PCIE_CAP_MAX_LINK_SPEED GENMASK(3, 0) + +#define LINK_CONTROL_LINK_STATUS_REG 0x080 +#define PCIE_CAP_NEGO_LINK_WIDTH GENMASK(23, 20) +#define PCIE_CAP_LINK_SPEED GENMASK(19, 16) + +#define TYPE1_CLASS_CODE_REV_ID_REG 0x008 +#define BASE_CLASS_CODE 0xFF000000 +#define BASE_CLASS_CODE_VALUE 0x06 +#define SUBCLASS_CODE 0x00FF0000 +#define SUBCLASS_CODE_VALUE 0x04 +#define PROGRAM_INTERFACE 0x0000FF00 +#define PROGRAM_INTERFACE_VALUE 0x00 + +#define GEN2_CONTROL_OFF 0x80c +#define DIRECT_SPEED_CHANGE BIT(17) + +#define MISC_CONTROL_1_OFF 0x8BC +#define DBI_RO_WR_EN BIT(0) + +static void or_writel(void *base, u32 offs, u32 val) +{ + writel(readl(base + offs) | val, base + offs); +} + +static void masked_writel(void *base, u32 offs, u32 mask, u32 val) +{ + u32 data; + int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */ + + if (val && shift > 1) + val <<= shift - 1; + + if (mask != ~0) + data = (readl(base + offs) & ~mask) | val; + else + data = val; + + writel(data, base + offs); +} + +static u32 masked_readl(void *base, u32 offs, u32 mask) +{ + u32 data; + int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */ + + data = readl(base + offs); + + if (mask != ~0) + data &= mask; + if (shift > 1) + data >>= shift - 1; + + return data; +} + +/* + * Since SynQuacer's PCIe RC is expected to be initialized in the + * firmware (including U-Boot), devicetree doesn't have control + * blocks. + * + * Thus, this will initialize the PCIe RC with fixed addresses. + */ + +#define SYNQUACER_PCI_SEG0_CONFIG_BASE 0x60000000 +#define SYNQUACER_PCI_SEG0_CONFIG_SIZE 0x07f00000 +#define SYNQUACER_PCI_SEG0_DBI_BASE 0x583d0000 +#define SYNQUACER_PCI_SEG0_EXS_BASE 0x58390000 + +#define SYNQUACER_PCI_SEG1_CONFIG_BASE 0x70000000 +#define SYNQUACER_PCI_SEG1_CONFIG_SIZE 0x07f00000 +#define SYNQUACER_PCI_SEG1_DBI_BASE 0x583c0000 +#define SYNQUACER_PCI_SEG1_EXS_BASE 0x58380000 + +#define SIZE_16KB 0x00004000 +#define SIZE_64KB 0x00010000 +#define SIZE_1MB 0x00100000 + +#define SYNQUACER_PCI_DBI_SIZE SIZE_16KB +#define SYNQUACER_PCI_EXS_SIZE SIZE_64KB + +#define NUM_SQ_PCI_RC 2 + +static const struct synquacer_pcie_base { + phys_addr_t cfg_base; + phys_addr_t dbi_base; + phys_addr_t exs_base; +} synquacer_pci_bases[NUM_SQ_PCI_RC] = { + { + .cfg_base = SYNQUACER_PCI_SEG0_CONFIG_BASE, + .dbi_base = SYNQUACER_PCI_SEG0_DBI_BASE, + .exs_base = SYNQUACER_PCI_SEG0_EXS_BASE, + }, { + .cfg_base = SYNQUACER_PCI_SEG1_CONFIG_BASE, + .dbi_base = SYNQUACER_PCI_SEG1_DBI_BASE, + .exs_base = SYNQUACER_PCI_SEG1_EXS_BASE, + }, +}; + +/** + * struct synquacer_ecam_pcie - synquacer_ecam PCIe controller state + * @cfg_base: The base address of memory mapped configuration space + */ +struct synquacer_ecam_pcie { + void *cfg_base; + pci_size_t size; + void *dbi_base; + void *exs_base; + int first_busno; + + struct pci_region mem; + struct pci_region io; + struct pci_region mem64; +}; + +DECLARE_GLOBAL_DATA_PTR; + +/** + * pci_synquacer_ecam_conf_address() - Calculate the address of a config access + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @paddress: Pointer to the pointer to write the calculates address to + * + * Calculates the address that should be accessed to perform a PCIe + * configuration space access for a given device identified by the PCIe + * controller device @pcie and the bus, device & function numbers in @bdf. If + * access to the device is not valid then the function will return an error + * code. Otherwise the address to access will be written to the pointer pointed + * to by @paddress. + */ +static int pci_synquacer_ecam_conf_address(const struct udevice *bus, + pci_dev_t bdf, uint offset, + void **paddress) +{ + struct synquacer_ecam_pcie *pcie = dev_get_priv(bus); + void *addr; + + addr = pcie->cfg_base; + addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf) - pcie->first_busno, + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + *paddress = addr; + + return 0; +} + +static bool pci_synquacer_ecam_addr_valid(const struct udevice *bus, + pci_dev_t bdf) +{ + struct synquacer_ecam_pcie *pcie = dev_get_priv(bus); + int num_buses = DIV_ROUND_UP(pcie->size, 1 << 16); + + /* + * The Synopsys DesignWare PCIe controller in ECAM mode will not filter + * type 0 config TLPs sent to devices 1 and up on its downstream port, + * resulting in devices appearing multiple times on bus 0 unless we + * filter out those accesses here. + */ + if (PCI_BUS(bdf) == pcie->first_busno && PCI_DEV(bdf) > 0) + return false; + + return (PCI_BUS(bdf) >= pcie->first_busno && + PCI_BUS(bdf) < pcie->first_busno + num_buses); +} + +/** + * pci_synquacer_ecam_read_config() - Read from configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + */ +static int pci_synquacer_ecam_read_config(const struct udevice *bus, + pci_dev_t bdf, uint offset, + ulong *valuep, enum pci_size_t size) +{ + if (!pci_synquacer_ecam_addr_valid(bus, bdf)) { + *valuep = pci_get_ff(size); + return 0; + } + + return pci_generic_mmap_read_config(bus, pci_synquacer_ecam_conf_address, + bdf, offset, valuep, size); +} + +/** + * pci_synquacer_ecam_write_config() - Write to configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + */ +static int pci_synquacer_ecam_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + if (!pci_synquacer_ecam_addr_valid(bus, bdf)) + return 0; + + return pci_generic_mmap_write_config(bus, pci_synquacer_ecam_conf_address, + bdf, offset, value, size); +} + +/** + * pci_synquacer_ecam_of_to_plat() - Translate from DT to device state + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pci_synquacer_ecam_of_to_plat(struct udevice *dev) +{ + struct synquacer_ecam_pcie *pcie = dev_get_priv(dev); + struct fdt_resource reg_res; + int i, err; + + debug("%s: called for %s\n", __func__, dev->name); + + err = fdt_get_resource(gd->fdt_blob, dev_of_offset(dev), "reg", + 0, ®_res); + if (err < 0) { + pr_err("\"reg\" resource not found\n"); + return err; + } + + /* Find the correct pair of the DBI/EXS base address */ + for (i = 0; i < NUM_SQ_PCI_RC; i++) { + if (synquacer_pci_bases[i].cfg_base == reg_res.start) + break; + } + if (i == NUM_SQ_PCI_RC) { + pr_err("Unknown ECAM base address %lx.\n", + (unsigned long)reg_res.start); + return -ENOENT; + } + pcie->dbi_base = map_physmem(synquacer_pci_bases[i].dbi_base, + SYNQUACER_PCI_DBI_SIZE, MAP_NOCACHE); + if (!pcie->dbi_base) { + pr_err("Failed to map DBI for %s\n", dev->name); + return -ENOMEM; + } + + pcie->exs_base = map_physmem(synquacer_pci_bases[i].exs_base, + SYNQUACER_PCI_EXS_SIZE, MAP_NOCACHE); + if (!pcie->exs_base) { + pr_err("Failed to map EXS for %s\n", dev->name); + return -ENOMEM; + } + + pcie->size = fdt_resource_size(®_res); + pcie->cfg_base = map_physmem(reg_res.start, pcie->size, MAP_NOCACHE); + if (!pcie->cfg_base) { + pr_err("Failed to map config space for %s\n", dev->name); + return -ENOMEM; + } + debug("mappings DBI: %p EXS: %p CFG: %p\n", pcie->dbi_base, pcie->exs_base, pcie->cfg_base); + + return 0; +} + +static void pci_synquacer_pre_init(struct synquacer_ecam_pcie *pcie) +{ + void *base = pcie->exs_base; + + masked_writel(base, EM_SELECT, PRE_DET_STT_SEL, 0); + masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 0); + masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 1); + + /* 1: Assert all PHY / LINK resets */ + masked_writel(base, RESET_SELECT_1, PERST_SEL, 0); + masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 0); + masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 0); + + /* Device Reset(PERST#) is effective afrer Set device_type (RC) */ + masked_writel(base, RESET_SELECT_1, PWUP_RST_SEL, 0); + masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 0); + masked_writel(base, RESET_SELECT_1, BUTTON_RST_SEL, 0); + masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 0); + masked_writel(base, RESET_SELECT_1, PWR_RST_SEL, 1); + masked_writel(base, RESET_SELECT_2, MSTR_ARST_SEL, 1); + masked_writel(base, RESET_SELECT_2, SLV_ARST_SEL, 1); + masked_writel(base, RESET_SELECT_2, DBI_ARST_SEL, 1); + masked_writel(base, RESET_SELECT_1, CORE_RST_SEL, 1); + masked_writel(base, RESET_SELECT_1, STI_RST_SEL, 1); + masked_writel(base, RESET_SELECT_1, N_STI_RST_SEL, 1); + masked_writel(base, RESET_SELECT_1, SQU_RST_SEL, 1); + masked_writel(base, RESET_SELECT_1, PHY_RST_SEL, 1); + + /* 2: Set P<n>_app_ltssm_enable='0' for reprogramming before linkup. */ + masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 0); + + /* 3: Set device_type (RC) */ + masked_writel(base, CORE_CONTROL, DEVICE_TYPE, 4); +} + +static void pci_synquacer_dbi_init(void *dbi_base) +{ + masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 1); + /* 4 Lanes */ + masked_writel(dbi_base, LINK_CAPABILITIES_REG, + PCIE_CAP_MAX_LINK_WIDTH, 4); + /* Gen 2 */ + masked_writel(dbi_base, LINK_CAPABILITIES_REG, + PCIE_CAP_MAX_LINK_SPEED, 2); + + masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG, + BASE_CLASS_CODE, BASE_CLASS_CODE_VALUE); + masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG, + SUBCLASS_CODE, SUBCLASS_CODE_VALUE); + masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG, + PROGRAM_INTERFACE, PROGRAM_INTERFACE_VALUE); + + masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 0); +} + +static void pcie_sq_prog_outbound_atu(void *dbi_base, int index, + u64 cpu_base, u64 pci_base, u64 size, + u32 type, u32 flags) +{ + debug("%s: %p, %d, %llx, %llx, %llx, %x, %x\n", __func__, + dbi_base, index, cpu_base, pci_base, size, type, flags); + + writel(IATU_VIEWPORT_OUTBOUND | IATU_VIEWPORT_REGION_INDEX(index), + dbi_base + IATU_VIEWPORT_OFF); + + writel((u32)(cpu_base & 0xffffffff), + dbi_base + IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0); + writel((u32)(cpu_base >> 32), + dbi_base + IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0); + writel((u32)(cpu_base + size - 1), + dbi_base + IATU_LIMIT_ADDR_OFF_OUTBOUND_0); + + writel((u32)(pci_base & 0xffffffff), + dbi_base + IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0); + writel((u32)(pci_base >> 32), + dbi_base + IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0); + + writel(type, dbi_base + IATU_REGION_CTRL_1_OFF_OUTBOUND_0); + writel(IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN | flags, + dbi_base + IATU_REGION_CTRL_2_OFF_OUTBOUND_0); +} + +static void pci_synquacer_post_init(struct synquacer_ecam_pcie *pcie) +{ + void *base = pcie->exs_base; + + /* + * 4: Set Bifurcation 1=disable 4=able + * 5: Supply Reference (It has executed) + * 6: Wait for 10usec (Reference Clocks is stable) + * 7: De assert PERST# + */ + masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 1); + masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 1); + + /* 8: Assert SYS_AUX_PWR_DET */ + masked_writel(base, PM_CONTROL_2, SYS_AUX_PWR_DET, 1); + + /* 9: Supply following clocks */ + masked_writel(base, AXI_CLK_STOP, MSTR_CSYSREQ_REG, 1); + masked_writel(base, AXI_CLK_STOP, MSTR_ACLK_STOP, 0); + masked_writel(base, AXI_CLK_STOP, SLV_CSYSREQ_REG, 1); + masked_writel(base, AXI_CLK_STOP, SLV_ACLK_STOP, 0); + masked_writel(base, AXI_CLK_STOP, DBI_CSYSREQ_REG, 1); + masked_writel(base, AXI_CLK_STOP, DBI_ACLK_STOP, 0); + + /* + * 10: De assert PHY reset + * 11: De assert LINK's PMC reset + */ + masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 1); + masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 1); + + /* 12: PHY auto + * 13: Wrapper auto + * 14-17: PHY auto + * 18: Wrapper auto + * 19: Update registers through DBI AXI Slave interface + */ + pci_synquacer_dbi_init(pcie->dbi_base); + + or_writel(pcie->dbi_base, PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + /* Force link speed change to Gen2 at link up */ + or_writel(pcie->dbi_base, GEN2_CONTROL_OFF, DIRECT_SPEED_CHANGE); + + /* Region 0: MMIO32 range */ + pcie_sq_prog_outbound_atu(pcie->dbi_base, 0, + pcie->mem.phys_start, + pcie->mem.bus_start, + pcie->mem.size, + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM | + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH, + IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT); + + /* Region 1: Type 0 config space */ + pcie_sq_prog_outbound_atu(pcie->dbi_base, 1, + (u64)pcie->cfg_base, + 0, + SIZE_64KB, + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0, + IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE); + + /* Region 2: Type 1 config space */ + pcie_sq_prog_outbound_atu(pcie->dbi_base, 2, + (u64)pcie->cfg_base + SIZE_64KB, + 0, + (u64)pcie->io.phys_start - (u64)pcie->cfg_base - SIZE_64KB, + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1, + IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE); + + /* Region 3: port I/O range */ + pcie_sq_prog_outbound_atu(pcie->dbi_base, 3, + pcie->io.phys_start, + pcie->io.bus_start, + pcie->io.size, + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO, + 0); + + /* Region 4: MMIO64 range */ + pcie_sq_prog_outbound_atu(pcie->dbi_base, 4, + pcie->mem64.phys_start, + pcie->mem64.bus_start, + pcie->mem64.size, + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM | + IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH, + IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT); + + /* enable link */ + if (masked_readl(base, CORE_CONTROL, APP_LTSSM_ENABLE) == 0) + masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 1); +} + +static int pci_synquacer_ecam_probe(struct udevice *dev) +{ + struct synquacer_ecam_pcie *pcie = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + + debug("Probe synquacer pcie for bus %d\n", dev_seq(dev)); + pcie->first_busno = dev_seq(dev); + + /* Store the IO and MEM windows settings for configuring ATU */ + pcie->io.phys_start = hose->regions[0].phys_start; /* IO base */ + pcie->io.bus_start = hose->regions[0].bus_start; /* IO_bus_addr */ + pcie->io.size = hose->regions[0].size; /* IO size */ + + pcie->mem.phys_start = hose->regions[1].phys_start; /* MEM base */ + pcie->mem.bus_start = hose->regions[1].bus_start; /* MEM_bus_addr */ + pcie->mem.size = hose->regions[1].size; /* MEM size */ + + pcie->mem64.phys_start = hose->regions[2].phys_start; /* MEM64 base */ + pcie->mem64.bus_start = hose->regions[2].bus_start; /* MEM64_bus_addr */ + pcie->mem64.size = hose->regions[2].size; /* MEM64 size */ + + pci_synquacer_pre_init(pcie); + + mdelay(150); + + pci_synquacer_post_init(pcie); + + /* It takes a while to stabilize the PCIe bus for scanning */ + mdelay(100); + + return 0; +} + +static const struct dm_pci_ops pci_synquacer_ecam_ops = { + .read_config = pci_synquacer_ecam_read_config, + .write_config = pci_synquacer_ecam_write_config, +}; + +static const struct udevice_id pci_synquacer_ecam_ids[] = { + { .compatible = "socionext,synquacer-pcie-ecam" }, + { } +}; + +U_BOOT_DRIVER(pci_synquacer_ecam) = { + .name = "pci_synquacer_ecam", + .id = UCLASS_PCI, + .of_match = pci_synquacer_ecam_ids, + .ops = &pci_synquacer_ecam_ops, + .probe = pci_synquacer_ecam_probe, + .of_to_plat = pci_synquacer_ecam_of_to_plat, + .priv_auto = sizeof(struct synquacer_ecam_pcie), +}; diff --git a/drivers/pci/pcie_fsl.c b/drivers/pci/pcie_fsl.c new file mode 100644 index 00000000000..18af23c9504 --- /dev/null +++ b/drivers/pci/pcie_fsl.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: GPL-2.0+ OR X11 +/* + * Copyright 2019 NXP + * + * PCIe DM U-Boot driver for Freescale PowerPC SoCs + * Author: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#include <config.h> +#include <dm.h> +#include <malloc.h> +#include <mapmem.h> +#include <pci.h> +#include <asm/fsl_pci.h> +#include <asm/fsl_serdes.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <linux/delay.h> +#include <linux/printk.h> +#include "pcie_fsl.h" +#include <dm/device_compat.h> + +LIST_HEAD(fsl_pcie_list); + +static int fsl_pcie_link_up(struct fsl_pcie *pcie); + +static int fsl_pcie_addr_valid(struct fsl_pcie *pcie, pci_dev_t bdf) +{ + struct udevice *bus = pcie->bus; + + if (!pcie->enabled) + return -ENXIO; + + if (PCI_BUS(bdf) < dev_seq(bus)) + return -EINVAL; + + if (PCI_BUS(bdf) > dev_seq(bus) && (!fsl_pcie_link_up(pcie) || pcie->mode)) + return -EINVAL; + + if (PCI_BUS(bdf) == dev_seq(bus) && (PCI_DEV(bdf) > 0 || PCI_FUNC(bdf) > 0)) + return -EINVAL; + + if (PCI_BUS(bdf) == (dev_seq(bus) + 1) && (PCI_DEV(bdf) > 0)) + return -EINVAL; + + return 0; +} + +static int fsl_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct fsl_pcie *pcie = dev_get_priv(bus); + ccsr_fsl_pci_t *regs = pcie->regs; + u32 val; + + if (fsl_pcie_addr_valid(pcie, bdf)) { + *valuep = pci_get_ff(size); + return 0; + } + + /* Skip Freescale PCIe controller's PEXCSRBAR register */ + if (PCI_BUS(bdf) - dev_seq(bus) == 0 && + PCI_DEV(bdf) == 0 && PCI_FUNC(bdf) == 0 && + (offset & ~3) == PCI_BASE_ADDRESS_0) { + *valuep = 0; + return 0; + } + + val = PCI_CONF1_EXT_ADDRESS(PCI_BUS(bdf) - dev_seq(bus), + PCI_DEV(bdf), PCI_FUNC(bdf), + offset); + out_be32(®s->cfg_addr, val); + + sync(); + + switch (size) { + case PCI_SIZE_8: + *valuep = in_8((u8 *)®s->cfg_data + (offset & 3)); + break; + case PCI_SIZE_16: + *valuep = in_le16((u16 *)((u8 *)®s->cfg_data + + (offset & 2))); + break; + case PCI_SIZE_32: + *valuep = in_le32(®s->cfg_data); + break; + } + + return 0; +} + +static int fsl_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct fsl_pcie *pcie = dev_get_priv(bus); + ccsr_fsl_pci_t *regs = pcie->regs; + u32 val; + u8 val_8; + u16 val_16; + u32 val_32; + + if (fsl_pcie_addr_valid(pcie, bdf)) + return 0; + + /* Skip Freescale PCIe controller's PEXCSRBAR register */ + if (PCI_BUS(bdf) - dev_seq(bus) == 0 && + PCI_DEV(bdf) == 0 && PCI_FUNC(bdf) == 0 && + (offset & ~3) == PCI_BASE_ADDRESS_0) + return 0; + + val = PCI_CONF1_EXT_ADDRESS(PCI_BUS(bdf) - dev_seq(bus), + PCI_DEV(bdf), PCI_FUNC(bdf), + offset); + out_be32(®s->cfg_addr, val); + + sync(); + + switch (size) { + case PCI_SIZE_8: + val_8 = value; + out_8((u8 *)®s->cfg_data + (offset & 3), val_8); + break; + case PCI_SIZE_16: + val_16 = value; + out_le16((u16 *)((u8 *)®s->cfg_data + (offset & 2)), val_16); + break; + case PCI_SIZE_32: + val_32 = value; + out_le32(®s->cfg_data, val_32); + break; + } + + return 0; +} + +static int fsl_pcie_hose_read_config(struct fsl_pcie *pcie, uint offset, + ulong *valuep, enum pci_size_t size) +{ + int ret; + struct udevice *bus = pcie->bus; + + ret = fsl_pcie_read_config(bus, PCI_BDF(dev_seq(bus), 0, 0), + offset, valuep, size); + + return ret; +} + +static int fsl_pcie_hose_write_config(struct fsl_pcie *pcie, uint offset, + ulong value, enum pci_size_t size) +{ + struct udevice *bus = pcie->bus; + + return fsl_pcie_write_config(bus, PCI_BDF(dev_seq(bus), 0, 0), + offset, value, size); +} + +static int fsl_pcie_hose_read_config_byte(struct fsl_pcie *pcie, uint offset, + u8 *valuep) +{ + ulong val; + int ret; + + ret = fsl_pcie_hose_read_config(pcie, offset, &val, PCI_SIZE_8); + *valuep = val; + + return ret; +} + +static int fsl_pcie_hose_read_config_word(struct fsl_pcie *pcie, uint offset, + u16 *valuep) +{ + ulong val; + int ret; + + ret = fsl_pcie_hose_read_config(pcie, offset, &val, PCI_SIZE_16); + *valuep = val; + + return ret; +} + +static int fsl_pcie_hose_read_config_dword(struct fsl_pcie *pcie, uint offset, + u32 *valuep) +{ + ulong val; + int ret; + + ret = fsl_pcie_hose_read_config(pcie, offset, &val, PCI_SIZE_32); + *valuep = val; + + return ret; +} + +static int fsl_pcie_hose_write_config_byte(struct fsl_pcie *pcie, uint offset, + u8 value) +{ + return fsl_pcie_hose_write_config(pcie, offset, value, PCI_SIZE_8); +} + +static int fsl_pcie_hose_write_config_word(struct fsl_pcie *pcie, uint offset, + u16 value) +{ + return fsl_pcie_hose_write_config(pcie, offset, value, PCI_SIZE_16); +} + +static int fsl_pcie_hose_write_config_dword(struct fsl_pcie *pcie, uint offset, + u32 value) +{ + return fsl_pcie_hose_write_config(pcie, offset, value, PCI_SIZE_32); +} + +static int fsl_pcie_link_up(struct fsl_pcie *pcie) +{ + ccsr_fsl_pci_t *regs = pcie->regs; + u16 ltssm; + + if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) { + ltssm = (in_be32(®s->pex_csr0) + & PEX_CSR0_LTSSM_MASK) >> PEX_CSR0_LTSSM_SHIFT; + return ltssm == LTSSM_L0_REV3; + } + + fsl_pcie_hose_read_config_word(pcie, PCI_LTSSM, <ssm); + + return ltssm == LTSSM_L0; +} + +static bool fsl_pcie_is_agent(struct fsl_pcie *pcie) +{ + u8 header_type; + + fsl_pcie_hose_read_config_byte(pcie, PCI_HEADER_TYPE, &header_type); + + return (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL; +} + +static int fsl_pcie_setup_law(struct fsl_pcie *pcie) +{ + struct pci_region *io, *mem, *pref; + + pci_get_regions(pcie->bus, &io, &mem, &pref); + + if (mem) + set_next_law(mem->phys_start, + law_size_bits(mem->size), + pcie->law_trgt_if); + + if (io) + set_next_law(io->phys_start, + law_size_bits(io->size), + pcie->law_trgt_if); + + return 0; +} + +static void fsl_pcie_config_ready(struct fsl_pcie *pcie) +{ + ccsr_fsl_pci_t *regs = pcie->regs; + + if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) { + setbits_be32(®s->config, FSL_PCIE_V3_CFG_RDY); + return; + } + + fsl_pcie_hose_write_config_byte(pcie, FSL_PCIE_CFG_RDY, 0x1); +} + +static int fsl_pcie_setup_outbound_win(struct fsl_pcie *pcie, int idx, + int type, u64 phys, u64 bus_addr, + pci_size_t size) +{ + ccsr_fsl_pci_t *regs = pcie->regs; + pot_t *po = ®s->pot[idx]; + u32 war, sz; + + if (idx < 0) + return -EINVAL; + + out_be32(&po->powbar, phys >> 12); + out_be32(&po->potar, bus_addr >> 12); +#ifdef CONFIG_SYS_PCI_64BIT + out_be32(&po->potear, bus_addr >> 44); +#else + out_be32(&po->potear, 0); +#endif + + sz = (__ilog2_u64((u64)size) - 1); + war = POWAR_EN | sz; + + if (type == PCI_REGION_IO) + war |= POWAR_IO_READ | POWAR_IO_WRITE; + else + war |= POWAR_MEM_READ | POWAR_MEM_WRITE; + + out_be32(&po->powar, war); + + return 0; +} + +static int fsl_pcie_setup_inbound_win(struct fsl_pcie *pcie, int idx, + bool pf, u64 phys, u64 bus_addr, + pci_size_t size) +{ + ccsr_fsl_pci_t *regs = pcie->regs; + pit_t *pi = ®s->pit[idx]; + u32 sz = (__ilog2_u64(size) - 1); + u32 flag = PIWAR_LOCAL; + + if (idx < 0) + return -EINVAL; + + out_be32(&pi->pitar, phys >> 12); + out_be32(&pi->piwbar, bus_addr >> 12); + +#ifdef CONFIG_SYS_PCI_64BIT + out_be32(&pi->piwbear, bus_addr >> 44); +#else + out_be32(&pi->piwbear, 0); +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A005434 + flag = 0; +#endif + + flag |= PIWAR_EN | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; + if (pf) + flag |= PIWAR_PF; + out_be32(&pi->piwar, flag | sz); + + return 0; +} + +static int fsl_pcie_setup_outbound_wins(struct fsl_pcie *pcie) +{ + struct pci_region *io, *mem, *pref; + int idx = 1; /* skip 0 */ + + pci_get_regions(pcie->bus, &io, &mem, &pref); + + if (io) + /* ATU : OUTBOUND : IO */ + fsl_pcie_setup_outbound_win(pcie, idx++, + PCI_REGION_IO, + io->phys_start, + io->bus_start, + io->size); + + if (mem) + /* ATU : OUTBOUND : MEM */ + fsl_pcie_setup_outbound_win(pcie, idx++, + PCI_REGION_MEM, + mem->phys_start, + mem->bus_start, + mem->size); + return 0; +} + +static int fsl_pcie_setup_inbound_wins(struct fsl_pcie *pcie) +{ + phys_addr_t phys_start = CFG_SYS_PCI_MEMORY_PHYS; + pci_addr_t bus_start = CFG_SYS_PCI_MEMORY_BUS; + u64 sz = min((u64)gd->ram_size, (1ull << 32)); + pci_size_t pci_sz; + int idx; + + if (pcie->block_rev >= PEX_IP_BLK_REV_2_2) + idx = 2; + else + idx = 3; + + pci_sz = 1ull << __ilog2_u64(sz); + + dev_dbg(pcie->bus, "R0 bus_start: %llx phys_start: %llx size: %llx\n", + (u64)bus_start, (u64)phys_start, (u64)sz); + + /* if we aren't an exact power of two match, pci_sz is smaller + * round it up to the next power of two. We report the actual + * size to pci region tracking. + */ + if (pci_sz != sz) + sz = 2ull << __ilog2_u64(sz); + + fsl_pcie_setup_inbound_win(pcie, idx--, true, + CFG_SYS_PCI_MEMORY_PHYS, + CFG_SYS_PCI_MEMORY_BUS, sz); +#if defined(CONFIG_PHYS_64BIT) && defined(CONFIG_SYS_PCI_64BIT) + /* + * On 64-bit capable systems, set up a mapping for all of DRAM + * in high pci address space. + */ + pci_sz = 1ull << __ilog2_u64(gd->ram_size); + /* round up to the next largest power of two */ + if (gd->ram_size > pci_sz) + pci_sz = 1ull << (__ilog2_u64(gd->ram_size) + 1); + + dev_dbg(pcie->bus, "R64 bus_start: %llx phys_start: %llx size: %llx\n", + (u64)CFG_SYS_PCI64_MEMORY_BUS, + (u64)CFG_SYS_PCI_MEMORY_PHYS, (u64)pci_sz); + + fsl_pcie_setup_inbound_win(pcie, idx--, true, + CFG_SYS_PCI_MEMORY_PHYS, + CFG_SYS_PCI64_MEMORY_BUS, pci_sz); +#endif + + return 0; +} + +static int fsl_pcie_init_atmu(struct fsl_pcie *pcie) +{ + fsl_pcie_setup_outbound_wins(pcie); + fsl_pcie_setup_inbound_wins(pcie); + + return 0; +} + +static void fsl_pcie_dbi_read_only_reg_write_enable(struct fsl_pcie *pcie, + bool enable) +{ + u32 val; + + fsl_pcie_hose_read_config_dword(pcie, DBI_RO_WR_EN, &val); + if (enable) + val |= 1; + else + val &= ~1; + fsl_pcie_hose_write_config_dword(pcie, DBI_RO_WR_EN, val); +} + +static int fsl_pcie_init_port(struct fsl_pcie *pcie) +{ + ccsr_fsl_pci_t *regs = pcie->regs; + u32 val_32; + u16 val_16; + + fsl_pcie_init_atmu(pcie); + +#ifdef CONFIG_FSL_PCIE_DISABLE_ASPM + val_32 = 0; + fsl_pcie_hose_read_config_dword(pcie, PCI_LCR, &val_32); + val_32 &= ~0x03; + fsl_pcie_hose_write_config_dword(pcie, PCI_LCR, val_32); + udelay(1); +#endif + +#ifdef CONFIG_FSL_PCIE_RESET + u16 ltssm; + int i; + + if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) { + /* assert PCIe reset */ + setbits_be32(®s->pdb_stat, 0x08000000); + (void)in_be32(®s->pdb_stat); + udelay(1000); + /* clear PCIe reset */ + clrbits_be32(®s->pdb_stat, 0x08000000); + asm("sync;isync"); + for (i = 0; i < 100 && !fsl_pcie_link_up(pcie); i++) + udelay(1000); + } else { + fsl_pcie_hose_read_config_word(pcie, PCI_LTSSM, <ssm); + if (ltssm == 1) { + /* assert PCIe reset */ + setbits_be32(®s->pdb_stat, 0x08000000); + (void)in_be32(®s->pdb_stat); + udelay(100); + /* clear PCIe reset */ + clrbits_be32(®s->pdb_stat, 0x08000000); + asm("sync;isync"); + for (i = 0; i < 100 && + !fsl_pcie_link_up(pcie); i++) + udelay(1000); + } + } +#endif + +#ifdef CONFIG_SYS_P4080_ERRATUM_PCIE_A003 + if (!fsl_pcie_link_up(pcie)) { + serdes_corenet_t *srds_regs; + + srds_regs = (void *)CFG_SYS_FSL_CORENET_SERDES_ADDR; + val_32 = in_be32(&srds_regs->srdspccr0); + + if ((val_32 >> 28) == 3) { + int i; + + out_be32(&srds_regs->srdspccr0, 2 << 28); + setbits_be32(®s->pdb_stat, 0x08000000); + in_be32(®s->pdb_stat); + udelay(100); + clrbits_be32(®s->pdb_stat, 0x08000000); + asm("sync;isync"); + for (i = 0; i < 100 && !fsl_pcie_link_up(pcie); i++) + udelay(1000); + } + } +#endif + + /* + * The Read-Only Write Enable bit defaults to 1 instead of 0. + * Set to 0 to protect the read-only registers. + */ +#ifdef CONFIG_SYS_FSL_ERRATUM_A007815 + fsl_pcie_dbi_read_only_reg_write_enable(pcie, false); +#endif + + /* + * Enable All Error Interrupts except + * - Master abort (pci) + * - Master PERR (pci) + * - ICCA (PCIe) + */ + out_be32(®s->peer, ~0x20140); + + /* set URR, FER, NFER (but not CER) */ + fsl_pcie_hose_read_config_dword(pcie, PCI_DCR, &val_32); + val_32 |= 0xf000e; + fsl_pcie_hose_write_config_dword(pcie, PCI_DCR, val_32); + + /* Clear all error indications */ + out_be32(®s->pme_msg_det, 0xffffffff); + out_be32(®s->pme_msg_int_en, 0xffffffff); + out_be32(®s->pedr, 0xffffffff); + + fsl_pcie_hose_read_config_word(pcie, PCI_DSR, &val_16); + if (val_16) + fsl_pcie_hose_write_config_word(pcie, PCI_DSR, 0xffff); + + fsl_pcie_hose_read_config_word(pcie, PCI_SEC_STATUS, &val_16); + if (val_16) + fsl_pcie_hose_write_config_word(pcie, PCI_SEC_STATUS, 0xffff); + + return 0; +} + +static int fsl_pcie_fixup_classcode(struct fsl_pcie *pcie) +{ + u32 classcode_reg; + u32 val; + + if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) { + classcode_reg = PCI_CLASS_REVISION; + fsl_pcie_dbi_read_only_reg_write_enable(pcie, true); + } else { + classcode_reg = CSR_CLASSCODE; + } + + fsl_pcie_hose_read_config_dword(pcie, classcode_reg, &val); + val &= 0xff; + val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + fsl_pcie_hose_write_config_dword(pcie, classcode_reg, val); + + if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) + fsl_pcie_dbi_read_only_reg_write_enable(pcie, false); + + return 0; +} + +static int fsl_pcie_init_rc(struct fsl_pcie *pcie) +{ + return fsl_pcie_fixup_classcode(pcie); +} + +static int fsl_pcie_init_ep(struct fsl_pcie *pcie) +{ + fsl_pcie_config_ready(pcie); + + return 0; +} + +static int fsl_pcie_probe(struct udevice *dev) +{ + struct fsl_pcie *pcie = dev_get_priv(dev); + ccsr_fsl_pci_t *regs = pcie->regs; + u16 val_16; + + pcie->bus = dev; + pcie->block_rev = in_be32(®s->block_rev1); + + list_add(&pcie->list, &fsl_pcie_list); + pcie->enabled = is_serdes_configured(PCIE1 + pcie->idx); + if (!pcie->enabled) { + printf("PCIe%d: %s disabled\n", pcie->idx, dev->name); + return 0; + } + + fsl_pcie_setup_law(pcie); + + pcie->mode = fsl_pcie_is_agent(pcie); + + fsl_pcie_init_port(pcie); + + printf("PCIe%d: %s ", pcie->idx, dev->name); + + if (pcie->mode) { + printf("Endpoint"); + fsl_pcie_init_ep(pcie); + } else { + printf("Root Complex"); + fsl_pcie_init_rc(pcie); + } + + if (!fsl_pcie_link_up(pcie)) { + printf(": %s\n", pcie->mode ? "undetermined link" : "no link"); + return 0; + } + + fsl_pcie_hose_read_config_word(pcie, PCI_LSR, &val_16); + printf(": x%d gen%d\n", (val_16 & 0x3f0) >> 4, (val_16 & 0xf)); + + return 0; +} + +static int fsl_pcie_of_to_plat(struct udevice *dev) +{ + struct fsl_pcie *pcie = dev_get_priv(dev); + struct fsl_pcie_data *info; + int ret; + + pcie->regs = dev_remap_addr(dev); + if (!pcie->regs) { + pr_err("\"reg\" resource not found\n"); + return -EINVAL; + } + + ret = dev_read_u32(dev, "law_trgt_if", &pcie->law_trgt_if); + if (ret < 0) { + pr_err("\"law_trgt_if\" not found\n"); + return ret; + } + + info = (struct fsl_pcie_data *)dev_get_driver_data(dev); + pcie->info = info; + pcie->idx = abs((u32)(dev_read_addr(dev) & info->block_offset_mask) - + info->block_offset) / info->stride; + + return 0; +} + +static const struct dm_pci_ops fsl_pcie_ops = { + .read_config = fsl_pcie_read_config, + .write_config = fsl_pcie_write_config, +}; + +static struct fsl_pcie_data p1_p2_data = { + .block_offset = 0xa000, + .block_offset_mask = 0xffff, + .stride = 0x1000, +}; + +static struct fsl_pcie_data p2041_data = { + .block_offset = 0x200000, + .block_offset_mask = 0x3fffff, + .stride = 0x1000, +}; + +static struct fsl_pcie_data t2080_data = { + .block_offset = 0x240000, + .block_offset_mask = 0x3fffff, + .stride = 0x10000, +}; + +static const struct udevice_id fsl_pcie_ids[] = { + { .compatible = "fsl,mpc8548-pcie", .data = (ulong)&p1_p2_data }, + { .compatible = "fsl,pcie-p1_p2", .data = (ulong)&p1_p2_data }, + { .compatible = "fsl,pcie-p2041", .data = (ulong)&p2041_data }, + { .compatible = "fsl,pcie-p3041", .data = (ulong)&p2041_data }, + { .compatible = "fsl,pcie-p4080", .data = (ulong)&p2041_data }, + { .compatible = "fsl,pcie-p5040", .data = (ulong)&p2041_data }, + { .compatible = "fsl,pcie-t102x", .data = (ulong)&t2080_data }, + { .compatible = "fsl,pcie-t104x", .data = (ulong)&t2080_data }, + { .compatible = "fsl,pcie-t2080", .data = (ulong)&t2080_data }, + { .compatible = "fsl,pcie-t4240", .data = (ulong)&t2080_data }, + { } +}; + +U_BOOT_DRIVER(fsl_pcie) = { + .name = "fsl_pcie", + .id = UCLASS_PCI, + .of_match = fsl_pcie_ids, + .ops = &fsl_pcie_ops, + .of_to_plat = fsl_pcie_of_to_plat, + .probe = fsl_pcie_probe, + .priv_auto = sizeof(struct fsl_pcie), +}; diff --git a/drivers/pci/pcie_fsl.h b/drivers/pci/pcie_fsl.h new file mode 100644 index 00000000000..ba84a232b83 --- /dev/null +++ b/drivers/pci/pcie_fsl.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019 NXP + * + * PCIe DM U-Boot driver for Freescale PowerPC SoCs + * Author: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#ifndef _PCIE_FSL_H_ +#define _PCIE_FSL_H_ + +/* GPEX CSR */ +#define CSR_CLASSCODE 0x474 + +#ifdef CONFIG_SYS_FSL_PCI_VER_3_X +#define FSL_PCIE_CAP_ID 0x70 +#else +#define FSL_PCIE_CAP_ID 0x4c +#endif +/* PCIe Device Control Register */ +#define PCI_DCR (FSL_PCIE_CAP_ID + 0x08) +/* PCIe Device Status Register */ +#define PCI_DSR (FSL_PCIE_CAP_ID + 0x0a) +/* PCIe Link Control Register */ +#define PCI_LCR (FSL_PCIE_CAP_ID + 0x10) +/* PCIe Link Status Register */ +#define PCI_LSR (FSL_PCIE_CAP_ID + 0x12) + +#define DBI_RO_WR_EN 0x8bc + +#ifndef CFG_SYS_PCI_MEMORY_BUS +#define CFG_SYS_PCI_MEMORY_BUS 0 +#endif + +#ifndef CFG_SYS_PCI_MEMORY_PHYS +#define CFG_SYS_PCI_MEMORY_PHYS 0 +#endif + +#if defined(CONFIG_SYS_PCI_64BIT) && !defined(CFG_SYS_PCI64_MEMORY_BUS) +#define CFG_SYS_PCI64_MEMORY_BUS (64ull * 1024 * 1024 * 1024) +#endif + +#define PEX_CSR0_LTSSM_MASK 0xFC +#define PEX_CSR0_LTSSM_SHIFT 2 +#define LTSSM_L0_REV3 0x11 +#define LTSSM_L0 0x16 + +struct fsl_pcie_data { + u32 block_offset; /* Offset from CCSR of 1st controller */ + u32 block_offset_mask; /* Mask out the CCSR base */ + u32 stride; /* Offset stride between controllers */ +}; + +struct fsl_pcie { + int idx; + struct udevice *bus; + void __iomem *regs; + u32 law_trgt_if; /* LAW target ID */ + u32 block_rev; /* IP block revision */ + bool mode; /* RC&EP mode flag */ + bool enabled; /* Enable status */ + struct list_head list; + struct fsl_pcie_data *info; +}; + +extern struct list_head fsl_pcie_list; + +#endif /* _PCIE_FSL_H_ */ diff --git a/drivers/pci/pcie_fsl_fixup.c b/drivers/pci/pcie_fsl_fixup.c new file mode 100644 index 00000000000..9187e7af746 --- /dev/null +++ b/drivers/pci/pcie_fsl_fixup.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ OR X11 +/* + * Copyright 2019 NXP + * + * PCIe Kernel DT fixup of DM U-Boot driver for Freescale PowerPC SoCs + * Author: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#ifdef CONFIG_OF_BOARD_SETUP +#include <dm.h> +#include <fdt_support.h> +#include <asm/fsl_pci.h> +#include <linux/libfdt.h> +#include "pcie_fsl.h" + +static void ft_fsl_pcie_setup(void *blob, struct fsl_pcie *pcie) +{ + struct pci_controller *hose = dev_get_uclass_priv(pcie->bus); + fdt_addr_t regs_addr; + int off; + + regs_addr = dev_read_addr(pcie->bus); + off = fdt_node_offset_by_compat_reg(blob, FSL_PCIE_COMPAT, regs_addr); + if (off < 0) { + printf("%s: Fail to find PCIe node@0x%pa\n", + FSL_PCIE_COMPAT, ®s_addr); + return; + } + + if (!hose || !pcie->enabled) + fdt_del_node(blob, off); + else + fdt_pci_dma_ranges(blob, off, hose); +} + +/* Fixup Kernel DT for PCIe */ +void pci_of_setup(void *blob, struct bd_info *bd) +{ + struct fsl_pcie *pcie; + + list_for_each_entry(pcie, &fsl_pcie_list, list) + ft_fsl_pcie_setup(blob, pcie); +} + +#else +void pci_of_setup(void *blob, struct bd_info *bd) +{ +} +#endif diff --git a/drivers/pci/pcie_imx.c b/drivers/pci/pcie_imx.c new file mode 100644 index 00000000000..11c4ccbfc55 --- /dev/null +++ b/drivers/pci/pcie_imx.c @@ -0,0 +1,790 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Freescale i.MX6 PCI Express Root-Complex driver + * + * Copyright (C) 2013 Marek Vasut <marex@denx.de> + * + * Based on upstream Linux kernel driver: + * pci-imx6.c: Sean Cross <xobs@kosagi.com> + * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> + * + * This is a legacy PCIe iMX driver kept to support older iMX6 SoCs. It is + * rather tied to quite old port of pcie-designware driver from Linux which + * suffices only iMX6 specific needs. But now we have modern PCIe iMX driver + * (drivers/pci/pcie_dw_imx.c) utilizing all the common DWC specific bits from + * (drivers/pci/pcie_dw_common.*). So you are encouraged to add any further iMX + * SoC support there or even better if you posses older iMX6 SoCs then switch + * those too in order to have a single modern PCIe iMX driver. + */ + +#include <init.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <power/regulator.h> +#include <asm/arch/clock.h> +#include <asm/arch/iomux.h> +#include <asm/arch/crm_regs.h> +#include <asm/gpio.h> +#include <asm/io.h> +#include <dm.h> +#include <linux/delay.h> +#include <linux/sizes.h> +#include <errno.h> +#include <asm/arch/sys_proto.h> + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +#ifdef CONFIG_MX6SX +#define MX6_DBI_ADDR 0x08ffc000 +#define MX6_IO_ADDR 0x08000000 +#define MX6_MEM_ADDR 0x08100000 +#define MX6_ROOT_ADDR 0x08f00000 +#else +#define MX6_DBI_ADDR 0x01ffc000 +#define MX6_IO_ADDR 0x01000000 +#define MX6_MEM_ADDR 0x01100000 +#define MX6_ROOT_ADDR 0x01f00000 +#endif +#define MX6_DBI_SIZE 0x4000 +#define MX6_IO_SIZE 0x100000 +#define MX6_MEM_SIZE 0xe00000 +#define MX6_ROOT_SIZE 0xfc000 + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 +#define PCIE_PL_PFLR (PL_OFFSET + 0x08) +#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) +#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) +#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) +#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) +#define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) +#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) + +#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) +#define PCIE_PHY_CTRL_DATA_LOC 0 +#define PCIE_PHY_CTRL_CAP_ADR_LOC 16 +#define PCIE_PHY_CTRL_CAP_DAT_LOC 17 +#define PCIE_PHY_CTRL_WR_LOC 18 +#define PCIE_PHY_CTRL_RD_LOC 19 + +#define PCIE_PHY_STAT (PL_OFFSET + 0x110) +#define PCIE_PHY_STAT_DATA_LOC 0 +#define PCIE_PHY_STAT_ACK_LOC 16 + +/* PHY registers (not memory-mapped) */ +#define PCIE_PHY_RX_ASIC_OUT 0x100D + +#define PHY_RX_OVRD_IN_LO 0x1005 +#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) +#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) + +#define PCIE_PHY_PUP_REQ (1 << 7) + +/* iATU registers */ +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +struct imx_pcie_priv { + void __iomem *dbi_base; + void __iomem *cfg_base; + struct gpio_desc reset_gpio; + bool reset_active_high; + struct udevice *vpcie; +}; + +/* + * PHY access functions + */ +static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) +{ + u32 val; + u32 max_iterations = 10; + u32 wait_counter = 0; + + do { + val = readl(dbi_base + PCIE_PHY_STAT); + val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; + wait_counter++; + + if (val == exp_val) + return 0; + + udelay(1); + } while (wait_counter < max_iterations); + + return -ETIMEDOUT; +} + +static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) +{ + u32 val; + int ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + writel(val, dbi_base + PCIE_PHY_CTRL); + + val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); + writel(val, dbi_base + PCIE_PHY_CTRL); + + ret = pcie_phy_poll_ack(dbi_base, 1); + if (ret) + return ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + writel(val, dbi_base + PCIE_PHY_CTRL); + + ret = pcie_phy_poll_ack(dbi_base, 0); + if (ret) + return ret; + + return 0; +} + +/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ +static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) +{ + u32 val, phy_ctl; + int ret; + + ret = pcie_phy_wait_ack(dbi_base, addr); + if (ret) + return ret; + + /* assert Read signal */ + phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; + writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); + + ret = pcie_phy_poll_ack(dbi_base, 1); + if (ret) + return ret; + + val = readl(dbi_base + PCIE_PHY_STAT); + *data = val & 0xffff; + + /* deassert Read signal */ + writel(0x00, dbi_base + PCIE_PHY_CTRL); + + ret = pcie_phy_poll_ack(dbi_base, 0); + if (ret) + return ret; + + return 0; +} + +static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) +{ + u32 var; + int ret; + + /* write addr */ + /* cap addr */ + ret = pcie_phy_wait_ack(dbi_base, addr); + if (ret) + return ret; + + var = data << PCIE_PHY_CTRL_DATA_LOC; + writel(var, dbi_base + PCIE_PHY_CTRL); + + /* capture data */ + var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); + writel(var, dbi_base + PCIE_PHY_CTRL); + + ret = pcie_phy_poll_ack(dbi_base, 1); + if (ret) + return ret; + + /* deassert cap data */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + writel(var, dbi_base + PCIE_PHY_CTRL); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(dbi_base, 0); + if (ret) + return ret; + + /* assert wr signal */ + var = 0x1 << PCIE_PHY_CTRL_WR_LOC; + writel(var, dbi_base + PCIE_PHY_CTRL); + + /* wait for ack */ + ret = pcie_phy_poll_ack(dbi_base, 1); + if (ret) + return ret; + + /* deassert wr signal */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + writel(var, dbi_base + PCIE_PHY_CTRL); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(dbi_base, 0); + if (ret) + return ret; + + writel(0x0, dbi_base + PCIE_PHY_CTRL); + + return 0; +} + +static int imx6_pcie_link_up(struct imx_pcie_priv *priv) +{ + u32 rc, ltssm; + int rx_valid, temp; + + /* link is debug bit 36, debug register 1 starts at bit 32 */ + rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1); + if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && + !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) + return -EAGAIN; + + /* + * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. + * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). + * If (MAC/LTSSM.state == Recovery.RcvrLock) + * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition + * to gen2 is stuck + */ + pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); + ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; + + if (rx_valid & 0x01) + return 0; + + if (ltssm != 0x0d) + return 0; + + printf("transition to gen2 is stuck, reset PHY!\n"); + + pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); + temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); + + udelay(3000); + + pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); + temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); + + return 0; +} + +/* + * iATU region setup + */ +static int imx_pcie_regions_setup(struct imx_pcie_priv *priv) +{ + /* + * i.MX6 defines 16MB in the AXI address map for PCIe. + * + * That address space excepted the pcie registers is + * split and defined into different regions by iATU, + * with sizes and offsets as follows: + * + * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO + * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM + * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers + */ + + /* CMD reg:I/O space, MEM space, and Bus Master Enable */ + setbits_le32(priv->dbi_base + PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI_NORMAL */ + setbits_le32(priv->dbi_base + PCI_CLASS_REVISION, + PCI_CLASS_BRIDGE_PCI_NORMAL << 8); + + /* Region #0 is used for Outbound CFG space access. */ + writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT); + + writel(lower_32_bits((uintptr_t)priv->cfg_base), + priv->dbi_base + PCIE_ATU_LOWER_BASE); + writel(upper_32_bits((uintptr_t)priv->cfg_base), + priv->dbi_base + PCIE_ATU_UPPER_BASE); + writel(lower_32_bits((uintptr_t)priv->cfg_base + MX6_ROOT_SIZE), + priv->dbi_base + PCIE_ATU_LIMIT); + + writel(0, priv->dbi_base + PCIE_ATU_LOWER_TARGET); + writel(0, priv->dbi_base + PCIE_ATU_UPPER_TARGET); + writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1); + writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); + + return 0; +} + +/* + * PCI Express accessors + */ +static void __iomem *get_bus_address(struct imx_pcie_priv *priv, + pci_dev_t d, int where) +{ + void __iomem *va_address; + + /* Reconfigure Region #0 */ + writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT); + + if (PCI_BUS(d) < 2) + writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1); + else + writel(PCIE_ATU_TYPE_CFG1, priv->dbi_base + PCIE_ATU_CR1); + + if (PCI_BUS(d) == 0) { + va_address = priv->dbi_base; + } else { + writel(d << 8, priv->dbi_base + PCIE_ATU_LOWER_TARGET); + va_address = priv->cfg_base; + } + + va_address += (where & ~0x3); + + return va_address; +} + +static int imx_pcie_addr_valid(pci_dev_t d) +{ + if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1)) + return -EINVAL; + if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) + return -EINVAL; + return 0; +} + +/* + * Replace the original ARM DABT handler with a simple jump-back one. + * + * The problem here is that if we have a PCIe bridge attached to this PCIe + * controller, but no PCIe device is connected to the bridges' downstream + * port, the attempt to read/write from/to the config space will produce + * a DABT. This is a behavior of the controller and can not be disabled + * unfortuatelly. + * + * To work around the problem, we backup the current DABT handler address + * and replace it with our own DABT handler, which only bounces right back + * into the code. + */ +static void imx_pcie_fix_dabt_handler(bool set) +{ + extern uint32_t *_data_abort; + uint32_t *data_abort_addr = (uint32_t *)&_data_abort; + + static const uint32_t data_abort_bounce_handler = 0xe25ef004; + uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; + + static uint32_t data_abort_backup; + + if (set) { + data_abort_backup = *data_abort_addr; + *data_abort_addr = data_abort_bounce_addr; + } else { + *data_abort_addr = data_abort_backup; + } +} + +static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d, + int where, u32 *val) +{ + void __iomem *va_address; + int ret; + + ret = imx_pcie_addr_valid(d); + if (ret) { + *val = 0xffffffff; + return 0; + } + + va_address = get_bus_address(priv, d, where); + + /* + * Read the PCIe config space. We must replace the DABT handler + * here in case we got data abort from the PCIe controller, see + * imx_pcie_fix_dabt_handler() description. Note that writing the + * "val" with valid value is also imperative here as in case we + * did got DABT, the val would contain random value. + */ + imx_pcie_fix_dabt_handler(true); + writel(0xffffffff, val); + *val = readl(va_address); + imx_pcie_fix_dabt_handler(false); + + return 0; +} + +static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d, + int where, u32 val) +{ + void __iomem *va_address = NULL; + int ret; + + ret = imx_pcie_addr_valid(d); + if (ret) + return ret; + + va_address = get_bus_address(priv, d, where); + + /* + * Write the PCIe config space. We must replace the DABT handler + * here in case we got data abort from the PCIe controller, see + * imx_pcie_fix_dabt_handler() description. + */ + imx_pcie_fix_dabt_handler(true); + writel(val, va_address); + imx_pcie_fix_dabt_handler(false); + + return 0; +} + +/* + * Initial bus setup + */ +static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv, + bool prepare_for_boot) +{ + struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; + + if (is_mx6dqp()) + setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST); + +#if defined(CONFIG_MX6SX) + struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR; + + /* SSP_EN is not used on MX6SX anymore */ + setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN); + /* Force PCIe PHY reset */ + setbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST); + /* Power up PCIe PHY */ + setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ); +#else + /* + * If the bootloader already enabled the link we need some special + * handling to get the core back into a state where it is safe to + * touch it for configuration. As there is no dedicated reset signal + * wired up for MX6QDL, we need to manually force LTSSM into "detect" + * state before completely disabling LTSSM, which is a prerequisite + * for core configuration. + * + * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong + * indication that the bootloader activated the link. + */ + if ((is_mx6dq() || is_mx6sdl()) && prepare_for_boot) { + u32 val, gpr1, gpr12; + + gpr1 = readl(&iomuxc_regs->gpr[1]); + gpr12 = readl(&iomuxc_regs->gpr[12]); + if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) && + (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) { + val = readl(priv->dbi_base + PCIE_PL_PFLR); + val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; + val |= PCIE_PL_PFLR_FORCE_LINK; + + imx_pcie_fix_dabt_handler(true); + writel(val, priv->dbi_base + PCIE_PL_PFLR); + imx_pcie_fix_dabt_handler(false); + + gpr12 &= ~IOMUXC_GPR12_PCIE_CTL_2; + writel(val, &iomuxc_regs->gpr[12]); + } + } + setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); + clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); +#endif + + return 0; +} + +static int imx6_pcie_init_phy(void) +{ + struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; + + clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); + + clrsetbits_le32(&iomuxc_regs->gpr[12], + IOMUXC_GPR12_DEVICE_TYPE_MASK, + IOMUXC_GPR12_DEVICE_TYPE_RC); + clrsetbits_le32(&iomuxc_regs->gpr[12], + IOMUXC_GPR12_LOS_LEVEL_MASK, + IOMUXC_GPR12_LOS_LEVEL_9); + +#ifdef CONFIG_MX6SX + clrsetbits_le32(&iomuxc_regs->gpr[12], + IOMUXC_GPR12_RX_EQ_MASK, + IOMUXC_GPR12_RX_EQ_2); +#endif + + writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | + (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | + (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | + (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | + (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET), + &iomuxc_regs->gpr[8]); + + return 0; +} + +int imx6_pcie_toggle_power(struct udevice *vpcie) +{ +#ifdef CFG_PCIE_IMX_POWER_GPIO + gpio_request(CFG_PCIE_IMX_POWER_GPIO, "pcie_power"); + gpio_direction_output(CFG_PCIE_IMX_POWER_GPIO, 0); + mdelay(20); + gpio_set_value(CFG_PCIE_IMX_POWER_GPIO, 1); + mdelay(20); + gpio_free(CFG_PCIE_IMX_POWER_GPIO); +#endif + +#if CONFIG_IS_ENABLED(DM_REGULATOR) + if (vpcie) { + regulator_set_enable(vpcie, false); + mdelay(20); + regulator_set_enable(vpcie, true); + mdelay(20); + } +#endif + return 0; +} + +int imx6_pcie_toggle_reset(struct gpio_desc *gpio, bool active_high) +{ + /* + * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' + * for detailed understanding of the PCIe CR reset logic. + * + * The PCIe #PERST reset line _MUST_ be connected, otherwise your + * design does not conform to the specification. You must wait at + * least 20 ms after de-asserting the #PERST so the EP device can + * do self-initialisation. + * + * In case your #PERST pin is connected to a plain GPIO pin of the + * CPU, you can define CFG_PCIE_IMX_PERST_GPIO in your board's + * configuration file and the condition below will handle the rest + * of the reset toggling. + * + * In case your #PERST line of the PCIe EP device is not connected + * at all, your design is broken and you should fix your design, + * otherwise you will observe problems like for example the link + * not coming up after rebooting the system back from running Linux + * that uses the PCIe as well OR the PCIe link might not come up in + * Linux at all in the first place since it's in some non-reset + * state due to being previously used in U-Boot. + */ +#ifdef CFG_PCIE_IMX_PERST_GPIO + gpio_request(CFG_PCIE_IMX_PERST_GPIO, "pcie_reset"); + gpio_direction_output(CFG_PCIE_IMX_PERST_GPIO, 0); + mdelay(20); + gpio_set_value(CFG_PCIE_IMX_PERST_GPIO, 1); + mdelay(20); + gpio_free(CFG_PCIE_IMX_PERST_GPIO); +#else + if (dm_gpio_is_valid(gpio)) { + /* Assert PERST# for 20ms then de-assert */ + dm_gpio_set_value(gpio, active_high ? 0 : 1); + mdelay(20); + dm_gpio_set_value(gpio, active_high ? 1 : 0); + mdelay(20); + } else { + puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); + } +#endif + return 0; +} + +static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv) +{ + struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; + + imx6_pcie_toggle_power(priv->vpcie); + + enable_pcie_clock(); + + if (is_mx6dqp()) + clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST); + + /* + * Wait for the clock to settle a bit, when the clock are sourced + * from the CPU, we need about 30 ms to settle. + */ + mdelay(50); + +#if defined(CONFIG_MX6SX) + /* SSP_EN is not used on MX6SX anymore */ + clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN); + /* Clear PCIe PHY reset bit */ + clrbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST); +#else + /* Enable PCIe */ + clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); + setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); +#endif + + imx6_pcie_toggle_reset(&priv->reset_gpio, priv->reset_active_high); + + return 0; +} + +static int imx_pcie_link_up(struct imx_pcie_priv *priv) +{ + struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; + uint32_t tmp; + int count = 0; + + imx6_pcie_assert_core_reset(priv, false); + imx6_pcie_init_phy(); + imx6_pcie_deassert_core_reset(priv); + + imx_pcie_regions_setup(priv); + + /* + * By default, the subordinate is set equally to the secondary + * bus (0x01) when the RC boots. + * This means that theoretically, only bus 1 is reachable from the RC. + * Force the PCIe RC subordinate to 0xff, otherwise no downstream + * devices will be detected if the enumeration is applied strictly. + */ + tmp = readl(priv->dbi_base + 0x18); + tmp |= (0xff << 16); + writel(tmp, priv->dbi_base + 0x18); + + /* + * FIXME: Force the PCIe RC to Gen1 operation + * The RC must be forced into Gen1 mode before bringing the link + * up, otherwise no downstream devices are detected. After the + * link is up, a managed Gen1->Gen2 transition can be initiated. + */ + tmp = readl(priv->dbi_base + 0x7c); + tmp &= ~0xf; + tmp |= 0x1; + writel(tmp, priv->dbi_base + 0x7c); + + /* LTSSM enable, starting link. */ + setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); + + while (!imx6_pcie_link_up(priv)) { + udelay(10); + count++; + if (count >= 4000) { +#ifdef CONFIG_PCI_SCAN_SHOW + puts("PCI: pcie phy link never came up\n"); +#endif + debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + readl(priv->dbi_base + PCIE_PHY_DEBUG_R0), + readl(priv->dbi_base + PCIE_PHY_DEBUG_R1)); + return -EINVAL; + } + } + + return 0; +} + +static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *value, + enum pci_size_t size) +{ + struct imx_pcie_priv *priv = dev_get_priv(dev); + u32 tmpval; + int ret; + + ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); + if (ret) + return ret; + + *value = pci_conv_32_to_size(tmpval, offset, size); + return 0; +} + +static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct imx_pcie_priv *priv = dev_get_priv(dev); + u32 tmpval, newval; + int ret; + + ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); + if (ret) + return ret; + + newval = pci_conv_size_to_32(tmpval, value, offset, size); + return imx_pcie_write_cfg(priv, bdf, offset, newval); +} + +static int imx_pcie_dm_probe(struct udevice *dev) +{ + struct imx_pcie_priv *priv = dev_get_priv(dev); + +#if CONFIG_IS_ENABLED(DM_REGULATOR) + device_get_supply_regulator(dev, "vpcie-supply", &priv->vpcie); +#endif + + /* if PERST# valid from dt then assert it */ + gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, + GPIOD_IS_OUT); + priv->reset_active_high = dev_read_bool(dev, "reset-gpio-active-high"); + if (dm_gpio_is_valid(&priv->reset_gpio)) { + dm_gpio_set_value(&priv->reset_gpio, + priv->reset_active_high ? 0 : 1); + } + + return imx_pcie_link_up(priv); +} + +static int imx_pcie_dm_remove(struct udevice *dev) +{ + struct imx_pcie_priv *priv = dev_get_priv(dev); + + imx6_pcie_assert_core_reset(priv, true); + + return 0; +} + +static int imx_pcie_of_to_plat(struct udevice *dev) +{ + struct imx_pcie_priv *priv = dev_get_priv(dev); + + priv->dbi_base = devfdt_get_addr_index_ptr(dev, 0); + priv->cfg_base = devfdt_get_addr_index_ptr(dev, 1); + if (!priv->dbi_base || !priv->cfg_base) + return -EINVAL; + + return 0; +} + +static const struct dm_pci_ops imx_pcie_ops = { + .read_config = imx_pcie_dm_read_config, + .write_config = imx_pcie_dm_write_config, +}; + +static const struct udevice_id imx_pcie_ids[] = { + { .compatible = "fsl,imx6q-pcie" }, + { .compatible = "fsl,imx6sx-pcie" }, + { } +}; + +U_BOOT_DRIVER(imx_pcie) = { + .name = "imx_pcie", + .id = UCLASS_PCI, + .of_match = imx_pcie_ids, + .ops = &imx_pcie_ops, + .probe = imx_pcie_dm_probe, + .remove = imx_pcie_dm_remove, + .of_to_plat = imx_pcie_of_to_plat, + .priv_auto = sizeof(struct imx_pcie_priv), + .flags = DM_FLAG_OS_PREPARE, +}; diff --git a/drivers/pci/pcie_intel_fpga.c b/drivers/pci/pcie_intel_fpga.c new file mode 100644 index 00000000000..959fd369086 --- /dev/null +++ b/drivers/pci/pcie_intel_fpga.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel FPGA PCIe host controller driver + * + * Copyright (C) 2013-2018 Intel Corporation. All rights reserved + * + */ + +#include <dm.h> +#include <pci.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/delay.h> + +#define RP_TX_REG0 0x2000 +#define RP_TX_CNTRL 0x2004 +#define RP_TX_SOP BIT(0) +#define RP_TX_EOP BIT(1) +#define RP_RXCPL_STATUS 0x200C +#define RP_RXCPL_SOP BIT(0) +#define RP_RXCPL_EOP BIT(1) +#define RP_RXCPL_REG 0x2008 +#define P2A_INT_STATUS 0x3060 +#define P2A_INT_STS_ALL 0xf +#define P2A_INT_ENABLE 0x3070 +#define RP_CAP_OFFSET 0x70 + +/* TLP configuration type 0 and 1 */ +#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ +#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ +#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ +#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ +#define TLP_PAYLOAD_SIZE 0x01 +#define TLP_READ_TAG 0x1d +#define TLP_WRITE_TAG 0x10 +#define RP_DEVFN 0 + +#define RP_CFG_ADDR(pcie, reg) \ + ((pcie->hip_base) + (reg) + (1 << 20)) +#define RP_SECONDARY(pcie) \ + readb(RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) +#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) + +#define TLP_CFGRD_DW0(pcie, bus) \ + ((((bus > RP_SECONDARY(pcie)) ? TLP_FMTTYPE_CFGRD1 \ + : TLP_FMTTYPE_CFGRD0) << 24) | \ + TLP_PAYLOAD_SIZE) + +#define TLP_CFGWR_DW0(pcie, bus) \ + ((((bus > RP_SECONDARY(pcie)) ? TLP_FMTTYPE_CFGWR1 \ + : TLP_FMTTYPE_CFGWR0) << 24) | \ + TLP_PAYLOAD_SIZE) + +#define TLP_CFG_DW1(pcie, tag, be) \ + (((TLP_REQ_ID(pcie->first_busno, RP_DEVFN)) << 16) | (tag << 8) | (be)) +#define TLP_CFG_DW2(bus, dev, fn, offset) \ + (((bus) << 24) | ((dev) << 19) | ((fn) << 16) | (offset)) + +#define TLP_COMP_STATUS(s) (((s) >> 13) & 7) +#define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff) +#define TLP_HDR_SIZE 3 +#define TLP_LOOP 20000 +#define DWORD_MASK 3 + +#define IS_ROOT_PORT(pcie, bdf) \ + ((PCI_BUS(bdf) == pcie->first_busno) ? true : false) + +/** + * struct intel_fpga_pcie - Intel FPGA PCIe controller state + * @bus: Pointer to the PCI bus + * @cra_base: The base address of CRA register space + * @hip_base: The base address of Rootport configuration space + * @first_busno: This driver supports multiple PCIe controllers. + * first_busno stores the bus number of the PCIe root-port + * number which may vary depending on the PCIe setup. + */ +struct intel_fpga_pcie { + struct udevice *bus; + void __iomem *cra_base; + void __iomem *hip_base; + int first_busno; +}; + +/** + * Intel FPGA PCIe port uses BAR0 of RC's configuration space as the + * translation from PCI bus to native BUS. Entire DDR region is mapped + * into PCIe space using these registers, so it can be reached by DMA from + * EP devices. + * The BAR0 of bridge should be hidden during enumeration to avoid the + * sizing and resource allocation by PCIe core. + */ +static bool intel_fpga_pcie_hide_rc_bar(struct intel_fpga_pcie *pcie, + pci_dev_t bdf, int offset) +{ + if (IS_ROOT_PORT(pcie, bdf) && PCI_DEV(bdf) == 0 && + PCI_FUNC(bdf) == 0 && offset == PCI_BASE_ADDRESS_0) + return true; + + return false; +} + +static inline void cra_writel(struct intel_fpga_pcie *pcie, const u32 value, + const u32 reg) +{ + writel(value, pcie->cra_base + reg); +} + +static inline u32 cra_readl(struct intel_fpga_pcie *pcie, const u32 reg) +{ + return readl(pcie->cra_base + reg); +} + +static bool intel_fpga_pcie_link_up(struct intel_fpga_pcie *pcie) +{ + return !!(readw(RP_CFG_ADDR(pcie, RP_CAP_OFFSET + PCI_EXP_LNKSTA)) + & PCI_EXP_LNKSTA_DLLLA); +} + +static bool intel_fpga_pcie_addr_valid(struct intel_fpga_pcie *pcie, + pci_dev_t bdf) +{ + /* If there is no link, then there is no device */ + if (!IS_ROOT_PORT(pcie, bdf) && !intel_fpga_pcie_link_up(pcie)) + return false; + + /* access only one slot on each root port */ + if (IS_ROOT_PORT(pcie, bdf) && PCI_DEV(bdf) > 0) + return false; + + if ((PCI_BUS(bdf) == pcie->first_busno + 1) && PCI_DEV(bdf) > 0) + return false; + + return true; +} + +static void tlp_write_tx(struct intel_fpga_pcie *pcie, u32 reg0, u32 ctrl) +{ + cra_writel(pcie, reg0, RP_TX_REG0); + cra_writel(pcie, ctrl, RP_TX_CNTRL); +} + +static int tlp_read_packet(struct intel_fpga_pcie *pcie, u32 *value) +{ + int i; + u32 ctrl; + u32 comp_status; + u32 dw[4]; + u32 count = 0; + + for (i = 0; i < TLP_LOOP; i++) { + ctrl = cra_readl(pcie, RP_RXCPL_STATUS); + if (!(ctrl & RP_RXCPL_SOP)) + continue; + + /* read first DW */ + dw[count++] = cra_readl(pcie, RP_RXCPL_REG); + + /* Poll for EOP */ + for (i = 0; i < TLP_LOOP; i++) { + ctrl = cra_readl(pcie, RP_RXCPL_STATUS); + dw[count++] = cra_readl(pcie, RP_RXCPL_REG); + if (ctrl & RP_RXCPL_EOP) { + comp_status = TLP_COMP_STATUS(dw[1]); + if (comp_status) { + *value = pci_get_ff(PCI_SIZE_32); + return 0; + } + + if (value && + TLP_BYTE_COUNT(dw[1]) == sizeof(u32) && + count >= 3) + *value = dw[3]; + + return 0; + } + } + + udelay(5); + } + + dev_err(pcie->dev, "read TLP packet timed out\n"); + return -ENODEV; +} + +static void tlp_write_packet(struct intel_fpga_pcie *pcie, u32 *headers, + u32 data) +{ + tlp_write_tx(pcie, headers[0], RP_TX_SOP); + + tlp_write_tx(pcie, headers[1], 0); + + tlp_write_tx(pcie, headers[2], 0); + + tlp_write_tx(pcie, data, RP_TX_EOP); +} + +static int tlp_cfg_dword_read(struct intel_fpga_pcie *pcie, pci_dev_t bdf, + int offset, u8 byte_en, u32 *value) +{ + u32 headers[TLP_HDR_SIZE]; + u8 busno = PCI_BUS(bdf); + + headers[0] = TLP_CFGRD_DW0(pcie, busno); + headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); + headers[2] = TLP_CFG_DW2(busno, PCI_DEV(bdf), PCI_FUNC(bdf), offset); + + tlp_write_packet(pcie, headers, 0); + + return tlp_read_packet(pcie, value); +} + +static int tlp_cfg_dword_write(struct intel_fpga_pcie *pcie, pci_dev_t bdf, + int offset, u8 byte_en, u32 value) +{ + u32 headers[TLP_HDR_SIZE]; + u8 busno = PCI_BUS(bdf); + + headers[0] = TLP_CFGWR_DW0(pcie, busno); + headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); + headers[2] = TLP_CFG_DW2(busno, PCI_DEV(bdf), PCI_FUNC(bdf), offset); + + tlp_write_packet(pcie, headers, value); + + return tlp_read_packet(pcie, NULL); +} + +int intel_fpga_rp_conf_addr(const struct udevice *bus, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct intel_fpga_pcie *pcie = dev_get_priv(bus); + + *paddress = RP_CFG_ADDR(pcie, offset); + + return 0; +} + +static int intel_fpga_pcie_rp_rd_conf(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, intel_fpga_rp_conf_addr, + bdf, offset, valuep, size); +} + +static int intel_fpga_pcie_rp_wr_conf(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + int ret; + struct intel_fpga_pcie *pcie = dev_get_priv(bus); + + ret = pci_generic_mmap_write_config(bus, intel_fpga_rp_conf_addr, + bdf, offset, value, size); + if (!ret) { + /* Monitor changes to PCI_PRIMARY_BUS register on root port + * and update local copy of root bus number accordingly. + */ + if (offset == PCI_PRIMARY_BUS) + pcie->first_busno = (u8)(value); + } + + return ret; +} + +static u8 pcie_get_byte_en(uint offset, enum pci_size_t size) +{ + switch (size) { + case PCI_SIZE_8: + return 1 << (offset & 3); + case PCI_SIZE_16: + return 3 << (offset & 3); + default: + return 0xf; + } +} + +static int _pcie_intel_fpga_read_config(struct intel_fpga_pcie *pcie, + pci_dev_t bdf, uint offset, + ulong *valuep, enum pci_size_t size) +{ + int ret; + u32 data; + u8 byte_en; + + /* Uses memory mapped method to read rootport config registers */ + if (IS_ROOT_PORT(pcie, bdf)) + return intel_fpga_pcie_rp_rd_conf(pcie->bus, bdf, + offset, valuep, size); + + byte_en = pcie_get_byte_en(offset, size); + ret = tlp_cfg_dword_read(pcie, bdf, offset & ~DWORD_MASK, + byte_en, &data); + if (ret) + return ret; + + dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08x)\n", + offset, size, data); + *valuep = pci_conv_32_to_size(data, offset, size); + + return 0; +} + +static int _pcie_intel_fpga_write_config(struct intel_fpga_pcie *pcie, + pci_dev_t bdf, uint offset, + ulong value, enum pci_size_t size) +{ + u32 data; + u8 byte_en; + + dev_dbg(pcie->dev, "PCIE CFG write: (b.d.f)=(%02d.%02d.%02d)\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08lx)\n", + offset, size, value); + + /* Uses memory mapped method to read rootport config registers */ + if (IS_ROOT_PORT(pcie, bdf)) + return intel_fpga_pcie_rp_wr_conf(pcie->bus, bdf, offset, + value, size); + + byte_en = pcie_get_byte_en(offset, size); + data = pci_conv_size_to_32(0, value, offset, size); + + return tlp_cfg_dword_write(pcie, bdf, offset & ~DWORD_MASK, + byte_en, data); +} + +static int pcie_intel_fpga_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct intel_fpga_pcie *pcie = dev_get_priv(bus); + + dev_dbg(pcie->dev, "PCIE CFG read: (b.d.f)=(%02d.%02d.%02d)\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + + if (intel_fpga_pcie_hide_rc_bar(pcie, bdf, offset)) { + *valuep = (u32)pci_get_ff(size); + return 0; + } + + if (!intel_fpga_pcie_addr_valid(pcie, bdf)) { + *valuep = (u32)pci_get_ff(size); + return 0; + } + + return _pcie_intel_fpga_read_config(pcie, bdf, offset, valuep, size); +} + +static int pcie_intel_fpga_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct intel_fpga_pcie *pcie = dev_get_priv(bus); + + if (intel_fpga_pcie_hide_rc_bar(pcie, bdf, offset)) + return 0; + + if (!intel_fpga_pcie_addr_valid(pcie, bdf)) + return 0; + + return _pcie_intel_fpga_write_config(pcie, bdf, offset, value, + size); +} + +static int pcie_intel_fpga_probe(struct udevice *dev) +{ + struct intel_fpga_pcie *pcie = dev_get_priv(dev); + + pcie->bus = pci_get_controller(dev); + pcie->first_busno = dev_seq(dev); + + /* clear all interrupts */ + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* disable all interrupts */ + cra_writel(pcie, 0, P2A_INT_ENABLE); + + return 0; +} + +static int pcie_intel_fpga_of_to_plat(struct udevice *dev) +{ + struct intel_fpga_pcie *pcie = dev_get_priv(dev); + struct fdt_resource reg_res; + int node = dev_of_offset(dev); + int ret; + + DECLARE_GLOBAL_DATA_PTR; + + ret = fdt_get_named_resource(gd->fdt_blob, node, "reg", "reg-names", + "Cra", ®_res); + if (ret) { + dev_err(dev, "resource \"Cra\" not found\n"); + return ret; + } + + pcie->cra_base = map_physmem(reg_res.start, + fdt_resource_size(®_res), + MAP_NOCACHE); + + ret = fdt_get_named_resource(gd->fdt_blob, node, "reg", "reg-names", + "Hip", ®_res); + if (ret) { + dev_err(dev, "resource \"Hip\" not found\n"); + return ret; + } + + pcie->hip_base = map_physmem(reg_res.start, + fdt_resource_size(®_res), + MAP_NOCACHE); + + return 0; +} + +static const struct dm_pci_ops pcie_intel_fpga_ops = { + .read_config = pcie_intel_fpga_read_config, + .write_config = pcie_intel_fpga_write_config, +}; + +static const struct udevice_id pcie_intel_fpga_ids[] = { + { .compatible = "altr,pcie-root-port-2.0" }, + {}, +}; + +U_BOOT_DRIVER(pcie_intel_fpga) = { + .name = "pcie_intel_fpga", + .id = UCLASS_PCI, + .of_match = pcie_intel_fpga_ids, + .ops = &pcie_intel_fpga_ops, + .of_to_plat = pcie_intel_fpga_of_to_plat, + .probe = pcie_intel_fpga_probe, + .priv_auto = sizeof(struct intel_fpga_pcie), +}; diff --git a/drivers/pci/pcie_iproc.c b/drivers/pci/pcie_iproc.c new file mode 100644 index 00000000000..360ef1b011f --- /dev/null +++ b/drivers/pci/pcie_iproc.c @@ -0,0 +1,1275 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020-2021 Broadcom + * + */ + +#include <dm.h> +#include <errno.h> +#include <generic-phy.h> +#include <pci.h> +#include <malloc.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/delay.h> +#include <linux/log2.h> + +#define EP_PERST_SOURCE_SELECT_SHIFT 2 +#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) +#define EP_MODE_SURVIVE_PERST_SHIFT 1 +#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) +#define RC_PCIE_RST_OUTPUT_SHIFT 0 +#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) + +#define CFG_IND_ADDR_MASK 0x00001ffc + +#define CFG_ADDR_CFG_ECAM_MASK 0xfffffffc +#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 + +#define IPROC_PCI_PM_CAP 0x48 +#define IPROC_PCI_PM_CAP_MASK 0xffff +#define IPROC_PCI_EXP_CAP 0xac + +#define IPROC_PCIE_REG_INVALID 0xffff + +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +/* CRS Software Visibility capability */ +#define PCI_EXP_RTCAP_CRSVIS 0x0001 + +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ + +#define PCIE_PHYLINKUP_SHIFT 3 +#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) +#define PCIE_DL_ACTIVE_SHIFT 2 +#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) + +/* derive the enum index of the outbound/inbound mapping registers */ +#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) + +/* + * Maximum number of outbound mapping window sizes that can be supported by any + * OARR/OMAP mapping pair + */ +#define MAX_NUM_OB_WINDOW_SIZES 4 + +#define OARR_VALID_SHIFT 0 +#define OARR_VALID BIT(OARR_VALID_SHIFT) +#define OARR_SIZE_CFG_SHIFT 1 + +/* + * Maximum number of inbound mapping region sizes that can be supported by an + * IARR + */ +#define MAX_NUM_IB_REGION_SIZES 9 + +#define IMAP_VALID_SHIFT 0 +#define IMAP_VALID BIT(IMAP_VALID_SHIFT) + +#define APB_ERR_EN_SHIFT 0 +#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) + +/** + * iProc PCIe host registers + */ +enum iproc_pcie_reg { + /* clock/reset signal control */ + IPROC_PCIE_CLK_CTRL = 0, + + /* + * To allow MSI to be steered to an external MSI controller (e.g., ARM + * GICv3 ITS) + */ + IPROC_PCIE_MSI_GIC_MODE, + + /* + * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the + * window where the MSI posted writes are written, for the writes to be + * interpreted as MSI writes. + */ + IPROC_PCIE_MSI_BASE_ADDR, + IPROC_PCIE_MSI_WINDOW_SIZE, + + /* + * To hold the address of the register where the MSI writes are + * programed. When ARM GICv3 ITS is used, this should be programmed + * with the address of the GITS_TRANSLATER register. + */ + IPROC_PCIE_MSI_ADDR_LO, + IPROC_PCIE_MSI_ADDR_HI, + + /* enable MSI */ + IPROC_PCIE_MSI_EN_CFG, + + /* allow access to root complex configuration space */ + IPROC_PCIE_CFG_IND_ADDR, + IPROC_PCIE_CFG_IND_DATA, + + /* allow access to device configuration space */ + IPROC_PCIE_CFG_ADDR, + IPROC_PCIE_CFG_DATA, + + /* enable INTx */ + IPROC_PCIE_INTX_EN, + IPROC_PCIE_INTX_CSR, + + /* outbound address mapping */ + IPROC_PCIE_OARR0, + IPROC_PCIE_OMAP0, + IPROC_PCIE_OARR1, + IPROC_PCIE_OMAP1, + IPROC_PCIE_OARR2, + IPROC_PCIE_OMAP2, + IPROC_PCIE_OARR3, + IPROC_PCIE_OMAP3, + + /* inbound address mapping */ + IPROC_PCIE_IARR0, + IPROC_PCIE_IMAP0, + IPROC_PCIE_IARR1, + IPROC_PCIE_IMAP1, + IPROC_PCIE_IARR2, + IPROC_PCIE_IMAP2, + IPROC_PCIE_IARR3, + IPROC_PCIE_IMAP3, + IPROC_PCIE_IARR4, + IPROC_PCIE_IMAP4, + + /* config read status */ + IPROC_PCIE_CFG_RD_STATUS, + + /* link status */ + IPROC_PCIE_LINK_STATUS, + + /* enable APB error for unsupported requests */ + IPROC_PCIE_APB_ERR_EN, + + /* Ordering Mode configuration registers */ + IPROC_PCIE_ORDERING_CFG, + IPROC_PCIE_IMAP0_RO_CONTROL, + IPROC_PCIE_IMAP1_RO_CONTROL, + IPROC_PCIE_IMAP2_RO_CONTROL, + IPROC_PCIE_IMAP3_RO_CONTROL, + IPROC_PCIE_IMAP4_RO_CONTROL, + + /* total number of core registers */ + IPROC_PCIE_MAX_NUM_REG, +}; + +/* iProc PCIe PAXB v2 registers */ +static const u16 iproc_pcie_reg_paxb_v2[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_INTX_CSR] = 0x334, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_OARR2] = 0xd60, + [IPROC_PCIE_OMAP2] = 0xd68, + [IPROC_PCIE_OARR3] = 0xdf0, + [IPROC_PCIE_OMAP3] = 0xdf8, + [IPROC_PCIE_IARR0] = 0xd00, + [IPROC_PCIE_IMAP0] = 0xc00, + [IPROC_PCIE_IARR2] = 0xd10, + [IPROC_PCIE_IMAP2] = 0xcc0, + [IPROC_PCIE_IARR3] = 0xe00, + [IPROC_PCIE_IMAP3] = 0xe08, + [IPROC_PCIE_IARR4] = 0xe68, + [IPROC_PCIE_IMAP4] = 0xe70, + [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, + [IPROC_PCIE_ORDERING_CFG] = 0x2000, + [IPROC_PCIE_IMAP0_RO_CONTROL] = 0x201c, + [IPROC_PCIE_IMAP1_RO_CONTROL] = 0x2020, + [IPROC_PCIE_IMAP2_RO_CONTROL] = 0x2024, + [IPROC_PCIE_IMAP3_RO_CONTROL] = 0x2028, + [IPROC_PCIE_IMAP4_RO_CONTROL] = 0x202c, +}; + +/* iProc PCIe PAXC v2 registers */ +static const u16 iproc_pcie_reg_paxc_v2[] = { + [IPROC_PCIE_MSI_GIC_MODE] = 0x050, + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, + [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, + [IPROC_PCIE_MSI_ADDR_HI] = 0x080, + [IPROC_PCIE_MSI_EN_CFG] = 0x09c, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, +}; + +/** + * List of device IDs of controllers that have corrupted + * capability list that require SW fixup + */ +static const u16 iproc_pcie_corrupt_cap_did[] = { + 0x16cd, + 0x16f0, + 0xd802, + 0xd804 +}; + +enum iproc_pcie_type { + IPROC_PCIE_PAXB_V2, + IPROC_PCIE_PAXC, + IPROC_PCIE_PAXC_V2, +}; + +/** + * struct iproc_pcie_ob - iProc PCIe outbound mapping + * + * @axi_offset: offset from the AXI address to the internal address used by + * the iProc PCIe core + * @nr_windows: total number of supported outbound mapping windows + */ +struct iproc_pcie_ob { + resource_size_t axi_offset; + unsigned int nr_windows; +}; + +/** + * struct iproc_pcie_ib - iProc PCIe inbound mapping + * + * @nr_regions: total number of supported inbound mapping regions + */ +struct iproc_pcie_ib { + unsigned int nr_regions; +}; + +/** + * struct iproc_pcie_ob_map - outbound mapping controller specific parameters + * + * @window_sizes: list of supported outbound mapping window sizes in MB + * @nr_sizes: number of supported outbound mapping window sizes + */ +struct iproc_pcie_ob_map { + resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; + unsigned int nr_sizes; +}; + +static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { + { + /* OARR0/OMAP0 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR1/OMAP1 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR2/OMAP2 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, + { + /* OARR3/OMAP3 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, +}; + +/** + * iProc PCIe inbound mapping type + */ +enum iproc_pcie_ib_map_type { + /* for DDR memory */ + IPROC_PCIE_IB_MAP_MEM = 0, + + /* for device I/O memory */ + IPROC_PCIE_IB_MAP_IO, + + /* invalid or unused */ + IPROC_PCIE_IB_MAP_INVALID +}; + +/** + * struct iproc_pcie_ib_map - inbound mapping controller specific parameters + * + * @type: inbound mapping region type + * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or SZ_1G + * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or + * GB, depedning on the size unit + * @nr_sizes: number of supported inbound mapping region sizes + * @nr_windows: number of supported inbound mapping windows for the region + * @imap_addr_offset: register offset between the upper and lower 32-bit + * IMAP address registers + * @imap_window_offset: register offset between each IMAP window + */ +struct iproc_pcie_ib_map { + enum iproc_pcie_ib_map_type type; + unsigned int size_unit; + resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; + unsigned int nr_sizes; + unsigned int nr_windows; + u16 imap_addr_offset; + u16 imap_window_offset; +}; + +static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { + { + /* IARR0/IMAP0 */ + .type = IPROC_PCIE_IB_MAP_IO, + .size_unit = SZ_1K, + .region_sizes = { 32 }, + .nr_sizes = 1, + .nr_windows = 8, + .imap_addr_offset = 0x40, + .imap_window_offset = 0x4, + }, + { + /* IARR1/IMAP1 (currently unused) */ + .type = IPROC_PCIE_IB_MAP_INVALID, + }, + { + /* IARR2/IMAP2 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1M, + .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, + 16384 }, + .nr_sizes = 9, + .nr_windows = 1, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR3/IMAP3 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 1, 2, 4, 8, 16, 32 }, + .nr_sizes = 6, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR4/IMAP4 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 32, 64, 128, 256, 512 }, + .nr_sizes = 5, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, +}; + +/** + * struct iproc_pcie - iproc pcie device instance + * + * @dev: pointer to pcie udevice + * @base: device I/O base address + * @type: pci device type, PAXC or PAXB + * @reg_offsets: pointer to pcie host register + * @fix_paxc_cap: paxc capability + * @need_ob_cfg: outbound mapping status + * @ob: pcie outbound mapping + * @ob_map: pointer to outbound mapping parameters + * @need_ib_cfg: inbound mapping status + * @ib: pcie inbound mapping + * @ib_map: pointer to inbound mapping parameters + * @ep_is_internal: ep status + * @phy: phy device + * @link_is_active: link up status + * @has_apb_err_disable: apb error status + */ +struct iproc_pcie { + struct udevice *dev; + void __iomem *base; + enum iproc_pcie_type type; + u16 *reg_offsets; + bool fix_paxc_cap; + bool need_ob_cfg; + struct iproc_pcie_ob ob; + const struct iproc_pcie_ob_map *ob_map; + bool need_ib_cfg; + struct iproc_pcie_ib ib; + const struct iproc_pcie_ib_map *ib_map; + bool ep_is_internal; + struct phy phy; + bool link_is_active; + bool has_apb_err_disable; +}; + +static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) +{ + return !!(reg_offset == IPROC_PCIE_REG_INVALID); +} + +static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + return pcie->reg_offsets[reg]; +} + +static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return 0; + + return readl(pcie->base + offset); +} + +static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg, u32 val) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return; + + writel(val, pcie->base + offset); +} + +static int iproc_pcie_map_ep_cfg_reg(const struct udevice *udev, pci_dev_t bdf, + uint where, void **paddress) +{ + struct iproc_pcie *pcie = dev_get_priv(udev); + unsigned int busno = PCI_BUS(bdf); + unsigned int slot = PCI_DEV(bdf); + unsigned int fn = PCI_FUNC(bdf); + + u16 offset; + u32 val; + + /* root complex access */ + if (busno == 0) { + if (slot > 0 || fn > 0) + return -ENODEV; + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, + where & CFG_IND_ADDR_MASK); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); + if (iproc_pcie_reg_is_invalid(offset)) + return -ENODEV; + + *paddress = (pcie->base + offset); + return 0; + } + + if (!pcie->link_is_active) + return -ENODEV; + + /* EP device access */ + val = (PCIE_ECAM_OFFSET(busno, slot, fn, where) & CFG_ADDR_CFG_ECAM_MASK) + | (1 & CFG_ADDR_CFG_TYPE_MASK); + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); + + if (iproc_pcie_reg_is_invalid(offset)) + return -ENODEV; + + *paddress = (pcie->base + offset); + + return 0; +} + +static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, ulong *val) +{ + u32 i, dev_id; + + switch (where & ~0x3) { + case PCI_VENDOR_ID: + dev_id = *val >> 16; + + /* + * Activate fixup for those controllers that have corrupted + * capability list registers + */ + for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) + if (dev_id == iproc_pcie_corrupt_cap_did[i]) + pcie->fix_paxc_cap = true; + break; + + case IPROC_PCI_PM_CAP: + if (pcie->fix_paxc_cap) { + /* advertise PM, force next capability to PCIe */ + *val &= ~IPROC_PCI_PM_CAP_MASK; + *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; + } + break; + + case IPROC_PCI_EXP_CAP: + if (pcie->fix_paxc_cap) { + /* advertise root port, version 2, terminate here */ + *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | + PCI_CAP_ID_EXP; + } + break; + + case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: + /* Don't advertise CRS SV support */ + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + break; + + default: + break; + } +} + +static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 *val) +{ + void __iomem *addr; + int ret; + + ret = iproc_pcie_map_ep_cfg_reg(pcie->dev, devfn, where & ~0x3, &addr); + if (ret) { + *val = ~0; + return -EINVAL; + } + + *val = readl(addr); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return 0; +} + +static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 val) +{ + void __iomem *addr; + int ret; + u32 mask, tmp; + + ret = iproc_pcie_map_ep_cfg_reg(pcie->dev, devfn, where & ~0x3, &addr); + if (ret) + return -EINVAL; + + if (size == 4) { + writel(val, addr); + return 0; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + tmp = readl(addr) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, addr); + return 0; +} + +/** + * iproc_pcie_apb_err_disable() - configure apb error + * + * APB error forwarding can be disabled during access of configuration + * registers of the endpoint device, to prevent unsupported requests + * (typically seen during enumeration with multi-function devices) from + * triggering a system exception. + * + * @bus: pcie udevice + * @bdf: pdf value + * @disabled: flag to enable/disabled apb error + */ +static inline void iproc_pcie_apb_err_disable(const struct udevice *bus, + pci_dev_t bdf, bool disable) +{ + struct iproc_pcie *pcie = dev_get_priv(bus); + u32 val; + + if (PCI_BUS(bdf) && pcie->has_apb_err_disable) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); + if (disable) + val &= ~APB_ERR_EN; + else + val |= APB_ERR_EN; + iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); + } +} + +static int iproc_pcie_config_read32(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct iproc_pcie *pcie = dev_get_priv(bus); + int ret; + ulong data; + + iproc_pcie_apb_err_disable(bus, bdf, true); + ret = pci_generic_mmap_read_config(bus, iproc_pcie_map_ep_cfg_reg, + bdf, offset, &data, PCI_SIZE_32); + iproc_pcie_apb_err_disable(bus, bdf, false); + if (size <= PCI_SIZE_16) + *valuep = (data >> (8 * (offset & 3))) & + ((1 << (BIT(size) * 8)) - 1); + else + *valuep = data; + + if (!ret && PCI_BUS(bdf) == 0) + iproc_pcie_fix_cap(pcie, offset, valuep); + + return ret; +} + +static int iproc_pcie_config_write32(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + void *addr; + ulong mask, tmp; + int ret; + + ret = iproc_pcie_map_ep_cfg_reg(bus, bdf, offset, &addr); + if (ret) + return ret; + + if (size == PCI_SIZE_32) { + writel(value, addr); + return ret; + } + + iproc_pcie_apb_err_disable(bus, bdf, true); + mask = ~(((1 << (BIT(size) * 8)) - 1) << ((offset & 0x3) * 8)); + tmp = readl(addr) & mask; + tmp |= (value << ((offset & 0x3) * 8)); + writel(tmp, addr); + iproc_pcie_apb_err_disable(bus, bdf, false); + + return ret; +} + +const static struct dm_pci_ops iproc_pcie_ops = { + .read_config = iproc_pcie_config_read32, + .write_config = iproc_pcie_config_write32, +}; + +static int iproc_pcie_rev_init(struct iproc_pcie *pcie) +{ + unsigned int reg_idx; + const u16 *regs; + u16 num_elements; + + switch (pcie->type) { + case IPROC_PCIE_PAXC_V2: + pcie->ep_is_internal = true; + regs = iproc_pcie_reg_paxc_v2; + num_elements = ARRAY_SIZE(iproc_pcie_reg_paxc_v2); + break; + case IPROC_PCIE_PAXB_V2: + regs = iproc_pcie_reg_paxb_v2; + num_elements = ARRAY_SIZE(iproc_pcie_reg_paxb_v2); + pcie->has_apb_err_disable = true; + if (pcie->need_ob_cfg) { + pcie->ob.axi_offset = 0; + pcie->ob_map = paxb_v2_ob_map; + pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); + } + pcie->need_ib_cfg = true; + pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); + pcie->ib_map = paxb_v2_ib_map; + break; + default: + dev_dbg(pcie->dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + pcie->reg_offsets = calloc(IPROC_PCIE_MAX_NUM_REG, + sizeof(*pcie->reg_offsets)); + if (!pcie->reg_offsets) + return -ENOMEM; + + /* go through the register table and populate all valid registers */ + pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? + IPROC_PCIE_REG_INVALID : regs[0]; + for (reg_idx = 1; reg_idx < num_elements; reg_idx++) + pcie->reg_offsets[reg_idx] = regs[reg_idx] ? + regs[reg_idx] : IPROC_PCIE_REG_INVALID; + + return 0; +} + +static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, + int window_idx) +{ + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); + + return !!(val & OARR_VALID); +} + +static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, + int size_idx, u64 axi_addr, u64 pci_addr) +{ + u16 oarr_offset, omap_offset; + + /* + * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based + * on window index. + */ + oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, + window_idx)); + omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, + window_idx)); + if (iproc_pcie_reg_is_invalid(oarr_offset) || + iproc_pcie_reg_is_invalid(omap_offset)) + return -EINVAL; + + /* + * Program the OARR registers. The upper 32-bit OARR register is + * always right after the lower 32-bit OARR register. + */ + writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | + OARR_VALID, pcie->base + oarr_offset); + writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); + + /* now program the OMAP registers */ + writel(lower_32_bits(pci_addr), pcie->base + omap_offset); + writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); + + debug("ob window [%d]: offset 0x%x axi %pap pci %pap\n", + window_idx, oarr_offset, &axi_addr, &pci_addr); + debug("oarr lo 0x%x oarr hi 0x%x\n", + readl(pcie->base + oarr_offset), + readl(pcie->base + oarr_offset + 4)); + debug("omap lo 0x%x omap hi 0x%x\n", + readl(pcie->base + omap_offset), + readl(pcie->base + omap_offset + 4)); + + return 0; +} + +/** + * iproc_pcie_setup_ob() - setup outbound address mapping + * + * Some iProc SoCs require the SW to configure the outbound address mapping + * Outbound address translation: + * + * iproc_pcie_address = axi_address - axi_offset + * OARR = iproc_pcie_address + * OMAP = pci_addr + * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address + * + * @pcie: pcie device + * @axi_addr: axi address to be translated + * @pci_addr: pci address + * @size: window size + * + * @return: 0 on success and -ve on failure + */ +static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + struct iproc_pcie_ob *ob = &pcie->ob; + int ret = -EINVAL, window_idx, size_idx; + + if (axi_addr < ob->axi_offset) { + pr_err("axi address %pap less than offset %pap\n", + &axi_addr, &ob->axi_offset); + return -EINVAL; + } + + /* + * Translate the AXI address to the internal address used by the iProc + * PCIe core before programming the OARR + */ + axi_addr -= ob->axi_offset; + + /* iterate through all OARR/OMAP mapping windows */ + for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { + const struct iproc_pcie_ob_map *ob_map = + &pcie->ob_map[window_idx]; + + /* + * If current outbound window is already in use, move on to the + * next one. + */ + if (iproc_pcie_ob_is_valid(pcie, window_idx)) + continue; + + /* + * Iterate through all supported window sizes within the + * OARR/OMAP pair to find a match. Go through the window sizes + * in a descending order. + */ + for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; + size_idx--) { + resource_size_t window_size = + ob_map->window_sizes[size_idx] * SZ_1M; + + /* + * Keep iterating until we reach the last window and + * with the minimal window size at index zero. In this + * case, we take a compromise by mapping it using the + * minimum window size that can be supported + */ + if (size < window_size) { + if (size_idx > 0 || window_idx > 0) + continue; + + /* + * For the corner case of reaching the minimal + * window size that can be supported on the + * last window + */ + axi_addr = ALIGN_DOWN(axi_addr, window_size); + pci_addr = ALIGN_DOWN(pci_addr, window_size); + size = window_size; + } + + if (!IS_ALIGNED(axi_addr, window_size) || + !IS_ALIGNED(pci_addr, window_size)) { + pr_err("axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* + * Match found! Program both OARR and OMAP and mark + * them as a valid entry. + */ + ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, + axi_addr, pci_addr); + if (ret) + goto err_ob; + + size -= window_size; + if (size == 0) + return 0; + + /* + * If we are here, we are done with the current window, + * but not yet finished all mappings. Need to move on + * to the next window. + */ + axi_addr += window_size; + pci_addr += window_size; + break; + } + } + +err_ob: + pr_err("unable to configure outbound mapping\n"); + pr_err("axi %pap, axi offset %pap, pci %pap, res size %pap\n", + &axi_addr, &ob->axi_offset, &pci_addr, &size); + + return ret; +} + +static int iproc_pcie_map_ranges(struct udevice *dev) +{ + struct iproc_pcie *pcie = dev_get_priv(dev); + struct udevice *bus = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(bus); + int i, ret; + + for (i = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags == PCI_REGION_MEM || + hose->regions[i].flags == PCI_REGION_PREFETCH) { + debug("%d: bus_addr %p, axi_addr %p, size 0x%llx\n", + i, &hose->regions[i].bus_start, + &hose->regions[i].phys_start, + hose->regions[i].size); + ret = iproc_pcie_setup_ob(pcie, + hose->regions[i].phys_start, + hose->regions[i].bus_start, + hose->regions[i].size); + if (ret) + return ret; + } + } + + return 0; +} + +static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, + int region_idx) +{ + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); + + return !!(val & (BIT(ib_map->nr_sizes) - 1)); +} + +static inline bool +iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, + enum iproc_pcie_ib_map_type type) +{ + return !!(ib_map->type == type); +} + +static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, + int size_idx, int nr_windows, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u16 iarr_offset, imap_offset; + u32 val; + int window_idx; + + iarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IARR0, + region_idx)); + imap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IMAP0, + region_idx)); + if (iproc_pcie_reg_is_invalid(iarr_offset) || + iproc_pcie_reg_is_invalid(imap_offset)) + return -EINVAL; + + debug("ib region [%d]: offset 0x%x axi %pap pci %pap\n", + region_idx, iarr_offset, &axi_addr, &pci_addr); + + /* + * Program the IARR registers. The upper 32-bit IARR register is + * always right after the lower 32-bit IARR register. + */ + writel(lower_32_bits(pci_addr) | BIT(size_idx), + pcie->base + iarr_offset); + writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); + + debug("iarr lo 0x%x iarr hi 0x%x\n", + readl(pcie->base + iarr_offset), + readl(pcie->base + iarr_offset + 4)); + + /* + * Now program the IMAP registers. Each IARR region may have one or + * more IMAP windows. + */ + size >>= ilog2(nr_windows); + for (window_idx = 0; window_idx < nr_windows; window_idx++) { + val = readl(pcie->base + imap_offset); + val |= lower_32_bits(axi_addr) | IMAP_VALID; + writel(val, pcie->base + imap_offset); + writel(upper_32_bits(axi_addr), + pcie->base + imap_offset + ib_map->imap_addr_offset); + + debug("imap window [%d] lo 0x%x hi 0x%x\n", + window_idx, readl(pcie->base + imap_offset), + readl(pcie->base + imap_offset + + ib_map->imap_addr_offset)); + + imap_offset += ib_map->imap_window_offset; + axi_addr += size; + } + + return 0; +} + +/** + * iproc_pcie_setup_ib() - setup inbound address mapping + * + * @pcie: pcie device + * @axi_addr: axi address to be translated + * @pci_addr: pci address + * @size: window size + * @type: inbound mapping type + * + * @return: 0 on success and -ve on failure + */ +static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, u64 axi_addr, + u64 pci_addr, resource_size_t size, + enum iproc_pcie_ib_map_type type) +{ + struct iproc_pcie_ib *ib = &pcie->ib; + int ret; + unsigned int region_idx, size_idx; + + /* iterate through all IARR mapping regions */ + for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { + const struct iproc_pcie_ib_map *ib_map = + &pcie->ib_map[region_idx]; + + /* + * If current inbound region is already in use or not a + * compatible type, move on to the next. + */ + if (iproc_pcie_ib_is_in_use(pcie, region_idx) || + !iproc_pcie_ib_check_type(ib_map, type)) + continue; + + /* iterate through all supported region sizes to find a match */ + for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { + resource_size_t region_size = + ib_map->region_sizes[size_idx] * ib_map->size_unit; + + if (size != region_size) + continue; + + if (!IS_ALIGNED(axi_addr, region_size) || + !IS_ALIGNED(pci_addr, region_size)) { + pr_err("axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* Match found! Program IARR and all IMAP windows. */ + ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, + ib_map->nr_windows, axi_addr, + pci_addr, size); + if (ret) + goto err_ib; + else + return 0; + } + } + ret = -EINVAL; + +err_ib: + pr_err("unable to configure inbound mapping\n"); + pr_err("axi %pap, pci %pap, res size %pap\n", + &axi_addr, &pci_addr, &size); + + return ret; +} + +static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) +{ + int ret; + struct pci_region regions; + int i = 0; + + while (!pci_get_dma_regions(pcie->dev, ®ions, i)) { + dev_dbg(pcie->dev, + "dma %d: bus_addr %#llx, axi_addr %#llx, size %#llx\n", + i, regions.bus_start, regions.phys_start, regions.size); + + /* Each range entry corresponds to an inbound mapping region */ + ret = iproc_pcie_setup_ib(pcie, regions.phys_start, + regions.bus_start, + regions.size, + IPROC_PCIE_IB_MAP_MEM); + if (ret) + return ret; + i++; + } + return 0; +} + +static void iproc_pcie_reset_map_regs(struct iproc_pcie *pcie) +{ + struct iproc_pcie_ib *ib = &pcie->ib; + struct iproc_pcie_ob *ob = &pcie->ob; + int window_idx, region_idx; + + if (pcie->ep_is_internal) + return; + + /* iterate through all OARR mapping regions */ + for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { + iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, + window_idx), 0); + } + + /* iterate through all IARR mapping regions */ + for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { + iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, + region_idx), 0); + } +} + +static void iproc_pcie_reset(struct iproc_pcie *pcie) +{ + u32 val; + + /* + * PAXC and the internal emulated endpoint device downstream should not + * be reset. If firmware has been loaded on the endpoint device at an + * earlier boot stage, reset here causes issues. + */ + if (pcie->ep_is_internal) + return; + + /* + * Select perst_b signal as reset source. Put the device into reset, + * and then bring it out of reset + */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & + ~RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + udelay(250); + + val |= RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + mdelay(100); +} + +static inline bool iproc_pcie_link_is_active(struct iproc_pcie *pcie) +{ + u32 val; + + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); + return !!((val & PCIE_PHYLINKUP) && (val & PCIE_DL_ACTIVE)); +} + +static int iproc_pcie_check_link(struct iproc_pcie *pcie) +{ + u32 link_status, class; + + pcie->link_is_active = false; + /* force class to PCI bridge Normal decode (0x060400) */ +#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c +#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff + iproc_pci_raw_config_read32(pcie, 0, + PCI_BRIDGE_CTRL_REG_OFFSET, + 4, &class); + class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; + class |= PCI_CLASS_BRIDGE_PCI_NORMAL; + iproc_pci_raw_config_write32(pcie, 0, + PCI_BRIDGE_CTRL_REG_OFFSET, + 4, class); + + /* + * PAXC connects to emulated endpoint devices directly and does not + * have a Serdes. Therefore skip the link detection logic here. + */ + if (pcie->ep_is_internal) { + pcie->link_is_active = true; + return 0; + } + + if (!iproc_pcie_link_is_active(pcie)) { + pr_err("PHY or data link is INACTIVE!\n"); + return -ENODEV; + } + +#define PCI_TARGET_LINK_SPEED_MASK 0xf +#define PCI_TARGET_LINK_WIDTH_MASK 0x3f +#define PCI_TARGET_LINK_WIDTH_OFFSET 0x4 + + /* check link status to see if link is active */ + iproc_pci_raw_config_read32(pcie, 0, + IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, + 2, &link_status); + if (link_status & PCI_EXP_LNKSTA_NLW) + pcie->link_is_active = true; + + if (pcie->link_is_active) + pr_info("link UP @ Speed Gen-%d and width-x%d\n", + link_status & PCI_TARGET_LINK_SPEED_MASK, + (link_status >> PCI_TARGET_LINK_WIDTH_OFFSET) & + PCI_TARGET_LINK_WIDTH_MASK); + else + pr_info("link DOWN\n"); + + return 0; +} + +static int iproc_pcie_probe(struct udevice *dev) +{ + struct iproc_pcie *pcie = dev_get_priv(dev); + int ret; + + pcie->type = (enum iproc_pcie_type)dev_get_driver_data(dev); + debug("PAX type %d\n", pcie->type); + pcie->base = dev_read_addr_ptr(dev); + debug("PAX reg base %p\n", pcie->base); + + if (!pcie->base) + return -ENODEV; + + if (dev_read_bool(dev, "brcm,pcie-ob")) + pcie->need_ob_cfg = true; + + pcie->dev = dev; + ret = iproc_pcie_rev_init(pcie); + if (ret) + return ret; + + if (!pcie->ep_is_internal) { + ret = generic_phy_get_by_name(dev, "pcie-phy", &pcie->phy); + if (!ret) { + ret = generic_phy_init(&pcie->phy); + if (ret) { + pr_err("failed to init %s PHY\n", dev->name); + return ret; + } + + ret = generic_phy_power_on(&pcie->phy); + if (ret) { + pr_err("power on %s PHY failed\n", dev->name); + goto err_exit_phy; + } + } + } + + iproc_pcie_reset(pcie); + + if (pcie->need_ob_cfg) { + ret = iproc_pcie_map_ranges(dev); + if (ret) { + pr_err("outbound map failed\n"); + goto err_power_off_phy; + } + } + + if (pcie->need_ib_cfg) { + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret) { + pr_err("inbound map failed\n"); + goto err_power_off_phy; + } + } + + if (iproc_pcie_check_link(pcie)) + pr_info("no PCIe EP device detected\n"); + + return 0; + +err_power_off_phy: + generic_phy_power_off(&pcie->phy); +err_exit_phy: + generic_phy_exit(&pcie->phy); + return ret; +} + +static int iproc_pcie_remove(struct udevice *dev) +{ + struct iproc_pcie *pcie = dev_get_priv(dev); + int ret; + + iproc_pcie_reset_map_regs(pcie); + + if (generic_phy_valid(&pcie->phy)) { + ret = generic_phy_power_off(&pcie->phy); + if (ret) { + pr_err("failed to power off PCIe phy\n"); + return ret; + } + + ret = generic_phy_exit(&pcie->phy); + if (ret) { + pr_err("failed to power off PCIe phy\n"); + return ret; + } + } + + return 0; +} + +static const struct udevice_id pci_iproc_ids[] = { + { .compatible = "brcm,iproc-pcie-paxb-v2", + .data = IPROC_PCIE_PAXB_V2 }, + { .compatible = "brcm,iproc-pcie-paxc-v2", + .data = IPROC_PCIE_PAXC_V2 }, + { } +}; + +U_BOOT_DRIVER(pci_iproc) = { + .name = "pci_iproc", + .id = UCLASS_PCI, + .of_match = pci_iproc_ids, + .ops = &iproc_pcie_ops, + .probe = iproc_pcie_probe, + .remove = iproc_pcie_remove, + .priv_auto = sizeof(struct iproc_pcie), + .flags = DM_FLAG_OS_PREPARE, +}; diff --git a/drivers/pci/pcie_layerscape.c b/drivers/pci/pcie_layerscape.c new file mode 100644 index 00000000000..1be33095b9c --- /dev/null +++ b/drivers/pci/pcie_layerscape.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2017-2020 NXP + * Copyright 2014-2015 Freescale Semiconductor, Inc. + * Layerscape PCIe driver + */ + +#include <log.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <errno.h> +#include <malloc.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif +#include "pcie_layerscape.h" + +DECLARE_GLOBAL_DATA_PTR; + +LIST_HEAD(ls_pcie_list); + +unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset) +{ + return in_le32(pcie->dbi + offset); +} + +void dbi_writel(struct ls_pcie *pcie, unsigned int value, unsigned int offset) +{ + out_le32(pcie->dbi + offset, value); +} + +unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset) +{ + if (pcie->big_endian) + return in_be32(pcie->ctrl + offset); + else + return in_le32(pcie->ctrl + offset); +} + +void ctrl_writel(struct ls_pcie *pcie, unsigned int value, + unsigned int offset) +{ + if (pcie->big_endian) + out_be32(pcie->ctrl + offset, value); + else + out_le32(pcie->ctrl + offset, value); +} + +void ls_pcie_dbi_ro_wr_en(struct ls_pcie *pcie) +{ + u32 reg, val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dbi_readl(pcie, reg); + val |= PCIE_DBI_RO_WR_EN; + dbi_writel(pcie, val, reg); +} + +void ls_pcie_dbi_ro_wr_dis(struct ls_pcie *pcie) +{ + u32 reg, val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dbi_readl(pcie, reg); + val &= ~PCIE_DBI_RO_WR_EN; + dbi_writel(pcie, val, reg); +} + +static int ls_pcie_ltssm(struct ls_pcie *pcie) +{ + u32 state; + uint svr; + + svr = get_svr(); + if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) { + state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx)); + state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; + } else { + state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK; + } + + return state; +} + +int ls_pcie_link_up(struct ls_pcie *pcie) +{ + int ltssm; + + ltssm = ls_pcie_ltssm(pcie); + if (ltssm < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type, + u64 phys, u64 bus_addr, u64 size) +{ + dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT); + dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE); + dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE); + dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT); + dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET); + dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET); + dbi_writel(pcie, type, PCIE_ATU_CR1); + dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + +/* Use bar match mode and MEM type as default */ +void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, u32 pf, u32 vf_flag, + int type, int idx, int bar, u64 phys) +{ + dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT); + dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET); + dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET); + dbi_writel(pcie, type | PCIE_ATU_FUNC_NUM(pf), PCIE_ATU_CR1); + dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE | + (vf_flag ? PCIE_ATU_FUNC_NUM_MATCH_EN : 0) | + (vf_flag ? PCIE_ATU_VFBAR_MATCH_MODE_EN : 0) | + PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2); +} + +void ls_pcie_dump_atu(struct ls_pcie *pcie, u32 win_num, u32 type) +{ + int win_idx; + + for (win_idx = 0; win_idx < win_num; win_idx++) { + dbi_writel(pcie, type | win_idx, PCIE_ATU_VIEWPORT); + debug("iATU%d:\n", win_idx); + debug("\tLOWER PHYS 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_LOWER_BASE)); + debug("\tUPPER PHYS 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_UPPER_BASE)); + if (type == PCIE_ATU_REGION_OUTBOUND) { + debug("\tLOWER BUS 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_LOWER_TARGET)); + debug("\tUPPER BUS 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_UPPER_TARGET)); + debug("\tLIMIT 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_LIMIT)); + } + debug("\tCR1 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_CR1)); + debug("\tCR2 0x%08x\n", + dbi_readl(pcie, PCIE_ATU_CR2)); + } +} diff --git a/drivers/pci/pcie_layerscape.h b/drivers/pci/pcie_layerscape.h new file mode 100644 index 00000000000..b7f692f6450 --- /dev/null +++ b/drivers/pci/pcie_layerscape.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2017-2020 NXP + * Copyright 2014-2015 Freescale Semiconductor, Inc. + * Layerscape PCIe driver + */ + +#ifndef _PCIE_LAYERSCAPE_H_ +#define _PCIE_LAYERSCAPE_H_ +#include <pci.h> + +#include <linux/sizes.h> +#include <asm/arch-fsl-layerscape/svr.h> +#include <asm/arch-ls102xa/svr.h> + +#ifndef CFG_SYS_PCI_MEMORY_BUS +#define CFG_SYS_PCI_MEMORY_BUS CFG_SYS_SDRAM_BASE +#endif + +#ifndef CFG_SYS_PCI_MEMORY_PHYS +#define CFG_SYS_PCI_MEMORY_PHYS CFG_SYS_SDRAM_BASE +#endif + +#ifndef CFG_SYS_PCI_MEMORY_SIZE +#define CFG_SYS_PCI_MEMORY_SIZE SZ_4G +#endif + +#ifndef CFG_SYS_PCI_EP_MEMORY_BASE +#define CFG_SYS_PCI_EP_MEMORY_BASE CONFIG_SYS_LOAD_ADDR +#endif + +#define PCIE_PHYS_SIZE 0x200000000 +#define LS2088A_PCIE_PHYS_SIZE 0x800000000 +#define LS2088A_PCIE1_PHYS_ADDR 0x2000000000 + +/* iATU registers */ +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) +#define PCIE_ATU_REGION_INDEX3 (0x3 << 0) +#define PCIE_ATU_REGION_NUM 6 +#define PCIE_ATU_REGION_NUM_SRIOV 24 +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) +#define PCIE_ATU_VFBAR_MATCH_MODE_EN BIT(26) +#define PCIE_ATU_BAR_NUM(bar) ((bar) << 8) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +/* DBI registers */ +#define PCIE_SRIOV 0x178 +#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ +#define PCIE_DBI_RO_WR_EN BIT(0) +#define PCIE_MISC_CONTROL_1_OFF 0x8BC + +#define PCIE_LINK_CAP 0x7c +#define PCIE_LINK_SPEED_MASK 0xf +#define PCIE_LINK_WIDTH_MASK 0x3f0 +#define PCIE_LINK_STA 0x82 + +#define LTSSM_STATE_MASK 0x3f +#define LTSSM_PCIE_L0 0x11 /* L0 state */ + +#define PCIE_DBI_SIZE 0x100000 /* 1M */ + +#define PCIE_LCTRL0_CFG2_ENABLE (1 << 31) +#define PCIE_LCTRL0_VF(vf) ((vf) << 22) +#define PCIE_LCTRL0_PF(pf) ((pf) << 16) +#define PCIE_LCTRL0_VF_ACTIVE (1 << 21) +#define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \ + PCIE_LCTRL0_VF(vf) | \ + ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \ + PCIE_LCTRL0_CFG2_ENABLE) + +#define PCIE_NO_SRIOV_BAR_BASE 0x1000 +#define FSL_PCIE_EP_MIN_APERTURE 4096 /* 4 Kbytes */ +#define PCIE_PF_NUM 2 +#define PCIE_VF_NUM 64 +#define BAR_NUM 8 + +#define PCIE_BAR0_SIZE SZ_4K +#define PCIE_BAR1_SIZE SZ_8K +#define PCIE_BAR2_SIZE SZ_4K +#define PCIE_BAR4_SIZE SZ_1M + +#define PCIE_SRIOV_VFBAR0 0x19C + +#define PCIE_MASK_OFFSET(flag, pf, off) ((flag) ? 0 : (0x1000 + (off) * (pf))) + +/* LUT registers */ +#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) +#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) +#define PCIE_LUT_ENABLE (1 << 31) +#define PCIE_LUT_ENTRY_COUNT 32 + +/* PF Controll registers */ +#define PCIE_PF_CONFIG 0x14 +#define PCIE_PF_VF_CTRL 0x7F8 +#define PCIE_PF_DBG 0x7FC +#define PCIE_CONFIG_READY (1 << 0) + +#define PCIE_SRDS_PRTCL(idx) (PCIE1 + (idx)) +#define PCIE_SYS_BASE_ADDR 0x3400000 +#define PCIE_CCSR_SIZE 0x0100000 + +/* CS2 */ +#define PCIE_CS2_OFFSET 0x1000 /* For PCIe without SR-IOV */ + +/* LS1021a PCIE space */ +#define LS1021_PCIE_SPACE_OFFSET 0x4000000000ULL +#define LS1021_PCIE_SPACE_SIZE 0x0800000000ULL + +/* LS1021a PEX1/2 Misc Ports Status Register */ +#define LS1021_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) +#define LS1021_LTSSM_STATE_SHIFT 20 + +/* LX2160a PF1 offset */ +#define LX2160_PCIE_PF1_OFFSET 0x8000 + +/* layerscape PF1 offset */ +#define LS_PCIE_PF1_OFFSET 0x20000 + +struct ls_pcie { + void __iomem *dbi; + void __iomem *lut; + void __iomem *ctrl; + int idx; + bool big_endian; + int mode; +}; + +struct ls_pcie_rc { + struct ls_pcie *pcie; + struct list_head list; + struct udevice *bus; + struct fdt_resource dbi_res; + struct fdt_resource lut_res; + struct fdt_resource ctrl_res; + struct fdt_resource cfg_res; + void __iomem *cfg0; + void __iomem *cfg1; + bool enabled; + int next_lut_index; + int stream_id_cur; +}; + +struct ls_pcie_ep { + struct fdt_resource addr_res; + struct ls_pcie *pcie; + struct udevice *bus; + void __iomem *addr; + u32 cfg2_flag; + u32 sriov_flag; + u32 pf1_offset; + u32 num_ib_wins; + u32 num_ob_wins; + u8 max_functions; +}; + +extern struct list_head ls_pcie_list; + +unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset); +void dbi_writel(struct ls_pcie *pcie, unsigned int value, unsigned int offset); +unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset); +void ctrl_writel(struct ls_pcie *pcie, unsigned int value, unsigned int offset); +void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type, + u64 phys, u64 bus_addr, u64 size); +void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, u32 pf, u32 vf_flag, + int type, int idx, int bar, u64 phys); +void ls_pcie_dump_atu(struct ls_pcie *pcie, u32 win_num, u32 type); +int ls_pcie_link_up(struct ls_pcie *pcie); +void ls_pcie_dbi_ro_wr_en(struct ls_pcie *pcie); +void ls_pcie_dbi_ro_wr_dis(struct ls_pcie *pcie); + +#endif /* _PCIE_LAYERSCAPE_H_ */ diff --git a/drivers/pci/pcie_layerscape_ep.c b/drivers/pci/pcie_layerscape_ep.c new file mode 100644 index 00000000000..3520488b345 --- /dev/null +++ b/drivers/pci/pcie_layerscape_ep.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020 NXP + * Layerscape PCIe EP driver + */ + +#include <config.h> +#include <asm/arch/fsl_serdes.h> +#include <dm.h> +#include <asm/global_data.h> +#include <dm/devres.h> +#include <errno.h> +#include <pci_ep.h> +#include <asm/io.h> +#include <linux/sizes.h> +#include <linux/log2.h> +#include "pcie_layerscape.h" + +DECLARE_GLOBAL_DATA_PTR; + +static void ls_pcie_ep_enable_cfg(struct ls_pcie_ep *pcie_ep) +{ + struct ls_pcie *pcie = pcie_ep->pcie; + u32 config; + + config = ctrl_readl(pcie, PCIE_PF_CONFIG); + config |= PCIE_CONFIG_READY; + ctrl_writel(pcie, config, PCIE_PF_CONFIG); +} + +static int ls_ep_set_bar(struct udevice *dev, uint fn, struct pci_bar *ep_bar) +{ + struct ls_pcie_ep *pcie_ep = dev_get_priv(dev); + struct ls_pcie *pcie = pcie_ep->pcie; + dma_addr_t bar_phys = ep_bar->phys_addr; + enum pci_barno bar = ep_bar->barno; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + int flags = ep_bar->flags; + int type, idx; + u64 size; + + idx = bar; + /* BAR size is 2^(aperture + 11) */ + size = max_t(size_t, ep_bar->size, FSL_PCIE_EP_MIN_APERTURE); + + if (!(flags & PCI_BASE_ADDRESS_SPACE)) + type = PCIE_ATU_TYPE_MEM; + else + type = PCIE_ATU_TYPE_IO; + + ls_pcie_atu_inbound_set(pcie, fn, 0, type, idx, bar, bar_phys); + + dbi_writel(pcie, lower_32_bits(size - 1), reg + PCIE_NO_SRIOV_BAR_BASE); + dbi_writel(pcie, flags, reg); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dbi_writel(pcie, upper_32_bits(size - 1), + reg + 4 + PCIE_NO_SRIOV_BAR_BASE); + dbi_writel(pcie, 0, reg + 4); + } + + return 0; +} + +static struct pci_ep_ops ls_pcie_ep_ops = { + .set_bar = ls_ep_set_bar, +}; + +static void ls_pcie_ep_setup_atu(struct ls_pcie_ep *pcie_ep, u32 pf) +{ + struct ls_pcie *pcie = pcie_ep->pcie; + u32 vf_flag = 0; + u64 phys = 0; + + phys = CFG_SYS_PCI_EP_MEMORY_BASE + pf * SZ_64M; + + phys = ALIGN(phys, PCIE_BAR0_SIZE); + /* ATU 0 : INBOUND : map BAR0 */ + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 0 + pf * BAR_NUM, 0, phys); + /* ATU 1 : INBOUND : map BAR1 */ + phys = ALIGN(phys + PCIE_BAR0_SIZE, PCIE_BAR1_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 1 + pf * BAR_NUM, 1, phys); + /* ATU 2 : INBOUND : map BAR2 */ + phys = ALIGN(phys + PCIE_BAR1_SIZE, PCIE_BAR2_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 2 + pf * BAR_NUM, 2, phys); + /* ATU 3 : INBOUND : map BAR2 */ + phys = ALIGN(phys + PCIE_BAR2_SIZE, PCIE_BAR4_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 3 + pf * BAR_NUM, 4, phys); + + if (pcie_ep->sriov_flag) { + vf_flag = 1; + /* ATU 4 : INBOUND : map BAR0 */ + phys = ALIGN(phys + PCIE_BAR4_SIZE, PCIE_BAR0_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 4 + pf * BAR_NUM, 0, phys); + /* ATU 5 : INBOUND : map BAR1 */ + phys = ALIGN(phys + PCIE_BAR0_SIZE * PCIE_VF_NUM, + PCIE_BAR1_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 5 + pf * BAR_NUM, 1, phys); + /* ATU 6 : INBOUND : map BAR2 */ + phys = ALIGN(phys + PCIE_BAR1_SIZE * PCIE_VF_NUM, + PCIE_BAR2_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 6 + pf * BAR_NUM, 2, phys); + /* ATU 7 : INBOUND : map BAR4 */ + phys = ALIGN(phys + PCIE_BAR2_SIZE * PCIE_VF_NUM, + PCIE_BAR4_SIZE); + ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM, + 7 + pf * BAR_NUM, 4, phys); + } + + /* ATU: OUTBOUND : map MEM */ + ls_pcie_atu_outbound_set(pcie, pf, PCIE_ATU_TYPE_MEM, + (u64)pcie_ep->addr_res.start + + pf * CFG_SYS_PCI_MEMORY_SIZE, + 0, CFG_SYS_PCI_MEMORY_SIZE); +} + +/* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */ +static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size) +{ + u32 mask; + + /* The least inbound window is 4KiB */ + if (size < SZ_4K) + mask = 0; + else + mask = size - 1; + + switch (bar) { + case 0: + writel(mask, bar_base + PCI_BASE_ADDRESS_0); + break; + case 1: + writel(mask, bar_base + PCI_BASE_ADDRESS_1); + break; + case 2: + writel(mask, bar_base + PCI_BASE_ADDRESS_2); + writel(0, bar_base + PCI_BASE_ADDRESS_3); + break; + case 4: + writel(mask, bar_base + PCI_BASE_ADDRESS_4); + writel(0, bar_base + PCI_BASE_ADDRESS_5); + break; + default: + break; + } +} + +static void ls_pcie_ep_setup_bars(void *bar_base) +{ + /* BAR0 - 32bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE); + /* BAR1 - 32bit - MEM*/ + ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE); + /* BAR2 - 64bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE); + /* BAR4 - 64bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE); +} + +static void ls_pcie_ep_setup_vf_bars(void *bar_base) +{ + /* VF BAR0 MASK register at offset 0x19c*/ + bar_base += PCIE_SRIOV_VFBAR0 - PCI_BASE_ADDRESS_0; + + /* VF-BAR0 - 32bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE); + /* VF-BAR1 - 32bit - MEM*/ + ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE); + /* VF-BAR2 - 64bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE); + /* VF-BAR4 - 64bit - MEM */ + ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE); +} + +static void ls_pcie_setup_ep(struct ls_pcie_ep *pcie_ep) +{ + u32 sriov; + u32 pf, vf; + void *bar_base = NULL; + struct ls_pcie *pcie = pcie_ep->pcie; + + sriov = readl(pcie->dbi + PCIE_SRIOV); + if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) { + pcie_ep->sriov_flag = 1; + for (pf = 0; pf < PCIE_PF_NUM; pf++) { + /* + * The VF_BARn_REG register's Prefetchable and Type bit + * fields are overwritten by a write to VF's BAR Mask + * register. Before writing to the VF_BARn_MASK_REG + * register, write 0b to the PCIE_MISC_CONTROL_1_OFF + * register. + */ + writel(0, pcie->dbi + PCIE_MISC_CONTROL_1_OFF); + + bar_base = pcie->dbi + + PCIE_MASK_OFFSET(pcie_ep->cfg2_flag, pf, + pcie_ep->pf1_offset); + + if (pcie_ep->cfg2_flag) { + ctrl_writel(pcie, + PCIE_LCTRL0_VAL(pf, 0), + PCIE_PF_VF_CTRL); + ls_pcie_ep_setup_bars(bar_base); + + for (vf = 1; vf <= PCIE_VF_NUM; vf++) { + ctrl_writel(pcie, + PCIE_LCTRL0_VAL(pf, vf), + PCIE_PF_VF_CTRL); + ls_pcie_ep_setup_vf_bars(bar_base); + } + } else { + ls_pcie_ep_setup_bars(bar_base); + ls_pcie_ep_setup_vf_bars(bar_base); + } + + ls_pcie_ep_setup_atu(pcie_ep, pf); + } + + if (pcie_ep->cfg2_flag) /* Disable CFG2 */ + ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL); + } else { + ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE); + ls_pcie_ep_setup_atu(pcie_ep, 0); + } + + ls_pcie_dump_atu(pcie, PCIE_ATU_REGION_NUM_SRIOV, + PCIE_ATU_REGION_INBOUND); + + ls_pcie_ep_enable_cfg(pcie_ep); +} + +static int ls_pcie_ep_probe(struct udevice *dev) +{ + struct ls_pcie_ep *pcie_ep = dev_get_priv(dev); + struct ls_pcie *pcie; + u16 link_sta; + int ret; + u32 svr; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie_ep->pcie = pcie; + + pcie->dbi = devfdt_get_addr_index_ptr(dev, 0); + if (!pcie->dbi) + return -EINVAL; + + pcie->ctrl = devfdt_get_addr_index_ptr(dev, 1); + if (!pcie->ctrl) + return -EINVAL; + + ret = fdt_get_named_resource(gd->fdt_blob, dev_of_offset(dev), + "reg", "reg-names", + "addr_space", &pcie_ep->addr_res); + if (ret) { + printf("%s: resource \"addr_space\" not found\n", dev->name); + return ret; + } + + pcie->idx = ((unsigned long)pcie->dbi - PCIE_SYS_BASE_ADDR) / + PCIE_CCSR_SIZE; + + /* This controller is disabled by RCW */ + if (!is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx))) + return 0; + + pcie->big_endian = fdtdec_get_bool(gd->fdt_blob, dev_of_offset(dev), + "big-endian"); + + svr = SVR_SOC_VER(get_svr()); + + if (svr == SVR_LX2160A || svr == SVR_LX2162A || + svr == SVR_LX2120A || svr == SVR_LX2080A || + svr == SVR_LX2122A || svr == SVR_LX2082A) + pcie_ep->pf1_offset = LX2160_PCIE_PF1_OFFSET; + else + pcie_ep->pf1_offset = LS_PCIE_PF1_OFFSET; + + if (svr == SVR_LS2080A || svr == SVR_LS2085A) + pcie_ep->cfg2_flag = 1; + else + pcie_ep->cfg2_flag = 0; + + pcie->mode = readb(pcie->dbi + PCI_HEADER_TYPE) & 0x7f; + if (pcie->mode != PCI_HEADER_TYPE_NORMAL) + return 0; + + pcie_ep->max_functions = fdtdec_get_int(gd->fdt_blob, + dev_of_offset(dev), + "max-functions", 1); + pcie_ep->num_ib_wins = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), + "num-ib-windows", 8); + pcie_ep->num_ob_wins = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), + "num-ob-windows", 8); + + printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name, + "Endpoint"); + ls_pcie_setup_ep(pcie_ep); + + if (!ls_pcie_link_up(pcie)) { + /* Let the user know there's no PCIe link */ + printf(": no link\n"); + return 0; + } + + /* Print the negotiated PCIe link width */ + link_sta = readw(pcie->dbi + PCIE_LINK_STA); + printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4, + link_sta & PCIE_LINK_SPEED_MASK); + + return 0; +} + +static int ls_pcie_ep_remove(struct udevice *dev) +{ + return 0; +} + +const struct udevice_id ls_pcie_ep_ids[] = { + { .compatible = "fsl,ls-pcie-ep" }, + { } +}; + +U_BOOT_DRIVER(pci_layerscape_ep) = { + .name = "pci_layerscape_ep", + .id = UCLASS_PCI_EP, + .of_match = ls_pcie_ep_ids, + .ops = &ls_pcie_ep_ops, + .probe = ls_pcie_ep_probe, + .remove = ls_pcie_ep_remove, + .priv_auto = sizeof(struct ls_pcie_ep), +}; diff --git a/drivers/pci/pcie_layerscape_fixup.c b/drivers/pci/pcie_layerscape_fixup.c new file mode 100644 index 00000000000..ec4a7e7b657 --- /dev/null +++ b/drivers/pci/pcie_layerscape_fixup.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2017-2021 NXP + * Copyright 2014-2015 Freescale Semiconductor, Inc. + * Layerscape PCIe driver + */ + +#include <dm.h> +#include <init.h> +#include <log.h> +#include <pci.h> +#include <asm/arch/fsl_serdes.h> +#include <asm/io.h> +#include <errno.h> +#ifdef CONFIG_OF_BOARD_SETUP +#include <linux/libfdt.h> +#include <fdt_support.h> +#ifdef CONFIG_ARM +#include <asm/arch/clock.h> +#endif +#include <malloc.h> +#include <env.h> +#include "pcie_layerscape.h" +#include "pcie_layerscape_fixup_common.h" + +int next_stream_id; + +static int fdt_pcie_get_nodeoffset(void *blob, struct ls_pcie_rc *pcie_rc) +{ + int nodeoffset; + uint svr; + char *compat = NULL; + + /* find pci controller node */ + nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie", + pcie_rc->dbi_res.start); + if (nodeoffset < 0) { +#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */ + svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; + if (svr == SVR_LS2088A || svr == SVR_LS2084A || + svr == SVR_LS2048A || svr == SVR_LS2044A || + svr == SVR_LS2081A || svr == SVR_LS2041A) + compat = "fsl,ls2088a-pcie"; + else + compat = CONFIG_FSL_PCIE_COMPAT; + + nodeoffset = + fdt_node_offset_by_compat_reg(blob, compat, + pcie_rc->dbi_res.start); +#endif + } + + return nodeoffset; +} + +#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) +/* + * Return next available LUT index. + */ +static int ls_pcie_next_lut_index(struct ls_pcie_rc *pcie_rc) +{ + if (pcie_rc->next_lut_index < PCIE_LUT_ENTRY_COUNT) + return pcie_rc->next_lut_index++; + else + return -ENOSPC; /* LUT is full */ +} + +static void lut_writel(struct ls_pcie_rc *pcie_rc, unsigned int value, + unsigned int offset) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + if (pcie->big_endian) + out_be32(pcie->lut + offset, value); + else + out_le32(pcie->lut + offset, value); +} + +/* + * Program a single LUT entry + */ +static void ls_pcie_lut_set_mapping(struct ls_pcie_rc *pcie_rc, int index, + u32 devid, u32 streamid) +{ + /* leave mask as all zeroes, want to match all bits */ + lut_writel(pcie_rc, devid << 16, PCIE_LUT_UDR(index)); + lut_writel(pcie_rc, streamid | PCIE_LUT_ENABLE, PCIE_LUT_LDR(index)); +} + +/* + * An msi-map is a property to be added to the pci controller + * node. It is a table, where each entry consists of 4 fields + * e.g.: + * + * msi-map = <[devid] [phandle-to-msi-ctrl] [stream-id] [count] + * [devid] [phandle-to-msi-ctrl] [stream-id] [count]>; + */ +static void fdt_pcie_set_msi_map_entry_ls(void *blob, + struct ls_pcie_rc *pcie_rc, + u32 devid, u32 streamid) +{ + u32 *prop; + u32 phandle; + int nodeoffset; + uint svr; + char *compat = NULL; + struct ls_pcie *pcie = pcie_rc->pcie; + + /* find pci controller node */ + nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie", + pcie_rc->dbi_res.start); + if (nodeoffset < 0) { +#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */ + svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; + if (svr == SVR_LS2088A || svr == SVR_LS2084A || + svr == SVR_LS2048A || svr == SVR_LS2044A || + svr == SVR_LS2081A || svr == SVR_LS2041A) + compat = "fsl,ls2088a-pcie"; + else + compat = CONFIG_FSL_PCIE_COMPAT; + if (compat) + nodeoffset = fdt_node_offset_by_compat_reg(blob, + compat, pcie_rc->dbi_res.start); +#endif + if (nodeoffset < 0) + return; + } + + /* get phandle to MSI controller */ + prop = (u32 *)fdt_getprop(blob, nodeoffset, "msi-parent", 0); + if (prop == NULL) { + debug("\n%s: ERROR: missing msi-parent: PCIe%d\n", + __func__, pcie->idx); + return; + } + phandle = fdt32_to_cpu(*prop); + + /* set one msi-map row */ + fdt_appendprop_u32(blob, nodeoffset, "msi-map", devid); + fdt_appendprop_u32(blob, nodeoffset, "msi-map", phandle); + fdt_appendprop_u32(blob, nodeoffset, "msi-map", streamid); + fdt_appendprop_u32(blob, nodeoffset, "msi-map", 1); +} + +/* + * An iommu-map is a property to be added to the pci controller + * node. It is a table, where each entry consists of 4 fields + * e.g.: + * + * iommu-map = <[devid] [phandle-to-iommu-ctrl] [stream-id] [count] + * [devid] [phandle-to-iommu-ctrl] [stream-id] [count]>; + */ +static void fdt_pcie_set_iommu_map_entry_ls(void *blob, + struct ls_pcie_rc *pcie_rc, + u32 devid, u32 streamid) +{ + u32 *prop; + u32 iommu_map[4]; + int nodeoffset; + int lenp; + struct ls_pcie *pcie = pcie_rc->pcie; + + nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (nodeoffset < 0) + return; + + /* get phandle to iommu controller */ + prop = fdt_getprop_w(blob, nodeoffset, "iommu-map", &lenp); + if (prop == NULL) { + debug("\n%s: ERROR: missing iommu-map: PCIe%d\n", + __func__, pcie->idx); + return; + } + + /* set iommu-map row */ + iommu_map[0] = cpu_to_fdt32(devid); + iommu_map[1] = *++prop; + iommu_map[2] = cpu_to_fdt32(streamid); + iommu_map[3] = cpu_to_fdt32(1); + + if (devid == 0) { + fdt_setprop_inplace(blob, nodeoffset, "iommu-map", + iommu_map, 16); + } else { + fdt_appendprop(blob, nodeoffset, "iommu-map", iommu_map, 16); + } +} + +static int fdt_fixup_pcie_device_ls(void *blob, pci_dev_t bdf, + struct ls_pcie_rc *pcie_rc) +{ + int streamid, index; + + streamid = pcie_next_streamid(pcie_rc->stream_id_cur, + pcie_rc->pcie->idx); + if (streamid < 0) { + printf("ERROR: out of stream ids for BDF %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + return -ENOENT; + } + pcie_rc->stream_id_cur++; + + index = ls_pcie_next_lut_index(pcie_rc); + if (index < 0) { + printf("ERROR: out of LUT indexes for BDF %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + return -ENOENT; + } + + /* map PCI b.d.f to streamID in LUT */ + ls_pcie_lut_set_mapping(pcie_rc, index, bdf >> 8, streamid); + /* update msi-map in device tree */ + fdt_pcie_set_msi_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid); + /* update iommu-map in device tree */ + fdt_pcie_set_iommu_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid); + + return 0; +} + +struct extra_iommu_entry { + int action; + pci_dev_t bdf; + int num_vfs; + bool noari; +}; + +#define EXTRA_IOMMU_ENTRY_HOTPLUG 1 +#define EXTRA_IOMMU_ENTRY_VFS 2 + +static struct extra_iommu_entry *get_extra_iommu_ents(void *blob, + int nodeoffset, + phys_addr_t addr, + int *cnt) +{ + const char *s, *p, *tok; + struct extra_iommu_entry *entries; + int i = 0, b, d, f; + + /* + * Retrieve extra IOMMU configuration from env var or from device tree. + * Env var is given priority. + */ + s = env_get("pci_iommu_extra"); + if (!s) { + s = fdt_getprop(blob, nodeoffset, "pci-iommu-extra", NULL); + } else { + phys_addr_t pci_base; + char *endp; + + /* + * In env var case the config string has "pci@0x..." in + * addition. Parse this part and match it by address against + * the input pci controller's registers base address. + */ + tok = s; + p = strchrnul(s + 1, ','); + s = NULL; + do { + if (!strncmp(tok, "pci", 3)) { + pci_base = simple_strtoul(tok + 4, &endp, 0); + if (pci_base == addr) { + s = endp + 1; + break; + } + } + p = strchrnul(p + 1, ','); + tok = p + 1; + } while (*p); + } + + /* + * If no env var or device tree property found or pci register base + * address mismatches, bail out + */ + if (!s) + return NULL; + + /* + * In order to find how many action entries to allocate, count number + * of actions by interating through the pairs of bdfs and actions. + */ + *cnt = 0; + p = s; + while (*p && strncmp(p, "pci", 3)) { + if (*p == ',') + (*cnt)++; + p++; + } + if (!(*p)) + (*cnt)++; + + if (!(*cnt) || (*cnt) % 2) { + printf("ERROR: invalid or odd extra iommu token count %d\n", + *cnt); + return NULL; + } + *cnt = (*cnt) / 2; + + entries = malloc((*cnt) * sizeof(*entries)); + if (!entries) { + printf("ERROR: fail to allocate extra iommu entries\n"); + return NULL; + } + + /* + * Parse action entries one by one and store the information in the + * newly allocated actions array. + */ + p = s; + while (p) { + /* Extract BDF */ + b = simple_strtoul(p, (char **)&p, 0); p++; + d = simple_strtoul(p, (char **)&p, 0); p++; + f = simple_strtoul(p, (char **)&p, 0); p++; + entries[i].bdf = PCI_BDF(b, d, f); + + /* Parse action */ + if (!strncmp(p, "hp", 2)) { + /* Hot-plug entry */ + entries[i].action = EXTRA_IOMMU_ENTRY_HOTPLUG; + p += 2; + } else if (!strncmp(p, "vfs", 3) || + !strncmp(p, "noari_vfs", 9)) { + /* VFs or VFs with ARI disabled entry */ + entries[i].action = EXTRA_IOMMU_ENTRY_VFS; + entries[i].noari = !strncmp(p, "noari_vfs", 9); + + /* + * Parse and store total number of VFs to allocate + * IOMMU entries for. + */ + p = strchr(p, '='); + entries[i].num_vfs = simple_strtoul(p + 1, (char **)&p, + 0); + if (*p) + p++; + } else { + printf("ERROR: invalid action in extra iommu entry\n"); + free(entries); + + return NULL; + } + + if (!(*p) || !strncmp(p, "pci", 3)) + break; + + i++; + } + + return entries; +} + +static void get_vf_offset_and_stride(struct udevice *dev, int sriov_pos, + struct extra_iommu_entry *entry, + u16 *offset, u16 *stride) +{ + u16 tmp16; + u32 tmp32; + bool have_ari = false; + int pos; + struct udevice *pf_dev; + + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_TOTAL_VF, &tmp16); + if (entry->num_vfs > tmp16) { + printf("WARN: requested no. of VFs %d exceeds total of %d\n", + entry->num_vfs, tmp16); + } + + /* + * The code below implements the VF Discovery recomandations specified + * in PCIe base spec "9.2.1.2 VF Discovery", quoted below: + * + * VF Discovery + * + * The First VF Offset and VF Stride fields in the SR-IOV extended + * capability are 16-bit Routing ID offsets. These offsets are used to + * compute the Routing IDs for the VFs with the following restrictions: + * - The value in NumVFs in a PF (Section 9.3.3.7) may affect the + * values in First VF Offset (Section 9.3.3.9) and VF Stride + * (Section 9.3.3.10) of that PF. + * - The value in ARI Capable Hierarchy (Section 9.3.3.3.5) in the + * lowest-numbered PF of the Device (for example PF0) may affect + * the values in First VF Offset and VF Stride in all PFs of the + * Device. + * - NumVFs of a PF may only be changed when VF Enable + * (Section 9.3.3.3.1) of that PF is Clear. + * - ARI Capable Hierarchy (Section 9.3.3.3.5) may only be changed + * when VF Enable is Clear in all PFs of a Device. + */ + + /* Clear VF enable for all PFs */ + device_foreach_child(pf_dev, dev->parent) { + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_VFE; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + + /* Obtain a reference to PF0 device */ + if (dm_pci_bus_find_bdf(PCI_BDF(PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), 0), &pf_dev)) { + printf("WARN: failed to get PF0\n"); + } + + if (entry->noari) + goto skip_ari; + + /* Check that connected downstream port supports ARI Forwarding */ + pos = dm_pci_find_capability(dev->parent, PCI_CAP_ID_EXP); + dm_pci_read_config32(dev->parent, pos + PCI_EXP_DEVCAP2, &tmp32); + if (!(tmp32 & PCI_EXP_DEVCAP2_ARI)) + goto skip_ari; + + /* Check that PF supports Alternate Routing ID */ + if (!dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) + goto skip_ari; + + /* Set ARI Capable Hierarcy for PF0 */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, &tmp16); + tmp16 |= PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, tmp16); + have_ari = true; + +skip_ari: + if (!have_ari) { + /* + * No ARI support or disabled so clear ARI Capable Hierarcy + * for PF0 + */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + + /* Set requested number of VFs */ + dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF, + entry->num_vfs); + + /* Read VF stride and offset with the configs just made */ + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_OFFSET, offset); + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_STRIDE, stride); + + if (have_ari) { + /* Reset to default ARI Capable Hierarcy bit for PF0 */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + /* Reset to default the number of VFs */ + dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF, 0); +} + +static int fdt_fixup_pci_vfs(void *blob, struct extra_iommu_entry *entry, + struct ls_pcie_rc *pcie_rc) +{ + struct udevice *dev, *bus; + u16 vf_offset, vf_stride; + int i, sriov_pos; + pci_dev_t bdf; + + if (dm_pci_bus_find_bdf(entry->bdf, &dev)) { + printf("ERROR: BDF %d.%d.%d not found\n", PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf)); + return 0; + } + + sriov_pos = dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov_pos) { + printf("WARN: trying to set VFs on non-SRIOV dev\n"); + return 0; + } + + get_vf_offset_and_stride(dev, sriov_pos, entry, &vf_offset, &vf_stride); + + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + + bdf = entry->bdf - PCI_BDF(dev_seq(bus), 0, 0) + (vf_offset << 8); + + for (i = 0; i < entry->num_vfs; i++) { + if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0) + return -1; + bdf += vf_stride << 8; + } + + printf("Added %d iommu VF mappings for PF %d.%d.%d\n", + entry->num_vfs, PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf)); + + return 0; +} + +static void fdt_fixup_pcie_ls(void *blob) +{ + struct udevice *dev, *bus; + struct ls_pcie_rc *pcie_rc; + pci_dev_t bdf; + struct extra_iommu_entry *entries; + int i, cnt, nodeoffset; + + + /* Scan all known buses */ + for (pci_find_first_device(&dev); + dev; + pci_find_next_device(&dev)) { + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + + /* Only do the fixups for layerscape PCIe controllers */ + if (!device_is_compatible(bus, "fsl,ls-pcie") && + !device_is_compatible(bus, CONFIG_FSL_PCIE_COMPAT)) + continue; + + pcie_rc = dev_get_priv(bus); + + /* the DT fixup must be relative to the hose first_busno */ + bdf = dm_pci_get_bdf(dev) - PCI_BDF(dev_seq(bus), 0, 0); + + if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0) + break; + } + + if (!IS_ENABLED(CONFIG_PCI_IOMMU_EXTRA_MAPPINGS)) + return; + + list_for_each_entry(pcie_rc, &ls_pcie_list, list) { + nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (nodeoffset < 0) { + printf("ERROR: couldn't find pci node\n"); + continue; + } + + entries = get_extra_iommu_ents(blob, nodeoffset, + pcie_rc->dbi_res.start, &cnt); + if (!entries) + continue; + + for (i = 0; i < cnt; i++) { + if (entries[i].action == EXTRA_IOMMU_ENTRY_HOTPLUG) { + bdf = entries[i].bdf; + printf("Added iommu map for hotplug %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), + PCI_FUNC(bdf)); + if (fdt_fixup_pcie_device_ls(blob, bdf, + pcie_rc) < 0) { + free(entries); + return; + } + } else if (entries[i].action == EXTRA_IOMMU_ENTRY_VFS) { + if (fdt_fixup_pci_vfs(blob, &entries[i], + pcie_rc) < 0) { + free(entries); + return; + } + } else { + printf("Invalid action %d for BDF %d.%d.%d\n", + entries[i].action, + PCI_BUS(entries[i].bdf), + PCI_DEV(entries[i].bdf), + PCI_FUNC(entries[i].bdf)); + } + } + free(entries); + } +} +#endif + +static void ft_pcie_rc_fix(void *blob, struct ls_pcie_rc *pcie_rc) +{ + int off; + struct ls_pcie *pcie = pcie_rc->pcie; + + off = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (off < 0) + return; + + if (pcie_rc->enabled && pcie->mode == PCI_HEADER_TYPE_BRIDGE) + fdt_set_node_status(blob, off, FDT_STATUS_OKAY); + else + fdt_set_node_status(blob, off, FDT_STATUS_DISABLED); +} + +static void ft_pcie_ep_fix(void *blob, struct ls_pcie_rc *pcie_rc) +{ + int off; + struct ls_pcie *pcie = pcie_rc->pcie; + + off = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_EP_COMPAT, + pcie_rc->dbi_res.start); + if (off < 0) + return; + + if (pcie_rc->enabled && pcie->mode == PCI_HEADER_TYPE_NORMAL) + fdt_set_node_status(blob, off, FDT_STATUS_OKAY); + else + fdt_set_node_status(blob, off, FDT_STATUS_DISABLED); +} + +static void ft_pcie_ls_setup(void *blob, struct ls_pcie_rc *pcie_rc) +{ + ft_pcie_ep_fix(blob, pcie_rc); + ft_pcie_rc_fix(blob, pcie_rc); + + pcie_rc->stream_id_cur = 0; + pcie_rc->next_lut_index = 0; +} + +/* Fixup Kernel DT for PCIe */ +void ft_pci_setup_ls(void *blob, struct bd_info *bd) +{ + struct ls_pcie_rc *pcie_rc; + +#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) + pcie_board_fix_fdt(blob); +#endif + + list_for_each_entry(pcie_rc, &ls_pcie_list, list) + ft_pcie_ls_setup(blob, pcie_rc); + +#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) + next_stream_id = FSL_PEX_STREAM_ID_START; + fdt_fixup_pcie_ls(blob); +#endif +} + +#else /* !CONFIG_OF_BOARD_SETUP */ +void ft_pci_setup_ls(void *blob, struct bd_info *bd) +{ +} +#endif diff --git a/drivers/pci/pcie_layerscape_fixup_common.c b/drivers/pci/pcie_layerscape_fixup_common.c new file mode 100644 index 00000000000..f37e37f6b15 --- /dev/null +++ b/drivers/pci/pcie_layerscape_fixup_common.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2019-2021 NXP + * + * PCIe DT fixup for NXP Layerscape SoCs + * Author: Wasim Khan <wasim.khan@nxp.com> + * + */ + +#include <init.h> +#include <asm/arch/clock.h> +#include <asm/arch/soc.h> +#include <linux/errno.h> +#include <linux/libfdt.h> +#include <fdt_support.h> +#include "pcie_layerscape_fixup_common.h" + +extern int next_stream_id; + +void ft_pci_setup(void *blob, struct bd_info *bd) +{ +#if defined(CONFIG_PCIE_LAYERSCAPE_GEN4) + uint svr; + + svr = SVR_SOC_VER(get_svr()); + + if (svr == SVR_LX2160A && IS_SVR_REV(get_svr(), 1, 0)) + ft_pci_setup_ls_gen4(blob, bd); + else +#endif /* CONFIG_PCIE_LAYERSCAPE_GEN4 */ + ft_pci_setup_ls(blob, bd); +} + +#if defined(CONFIG_FSL_LAYERSCAPE) +static int lx2_board_fix_fdt(void *fdt) +{ + char *reg_name, *old_str, *new_str; + const char *reg_names; + int names_len, old_str_len, new_str_len, remaining_str_len; + struct str_map { + char *old_str; + char *new_str; + } reg_names_map[] = { + { "csr_axi_slave", "regs" }, + { "config_axi_slave", "config" } + }; + int off = -1, i; + const fdt32_t *prop; + u32 ob_wins, ib_wins; + + fdt_for_each_node_by_compatible(off, fdt, -1, "fsl,lx2160a-pcie") { + fdt_setprop(fdt, off, "compatible", "fsl,ls2088a-pcie", + strlen("fsl,ls2088a-pcie") + 1); + + reg_names = fdt_getprop(fdt, off, "reg-names", &names_len); + if (!reg_names) + continue; + reg_name = (char *)reg_names; + remaining_str_len = names_len - (reg_name - reg_names); + i = 0; + while ((i < ARRAY_SIZE(reg_names_map)) && remaining_str_len) { + old_str = reg_names_map[i].old_str; + new_str = reg_names_map[i].new_str; + old_str_len = strlen(old_str); + new_str_len = strlen(new_str); + if (memcmp(reg_name, old_str, old_str_len) == 0) { + /* first only leave required bytes for new_str + * and copy rest of the string after it + */ + memcpy(reg_name + new_str_len, + reg_name + old_str_len, + remaining_str_len - old_str_len); + + /* Now copy new_str */ + memcpy(reg_name, new_str, new_str_len); + names_len -= old_str_len; + names_len += new_str_len; + i++; + } + + reg_name = memchr(reg_name, '\0', remaining_str_len); + if (!reg_name) + break; + reg_name += 1; + + remaining_str_len = names_len - (reg_name - reg_names); + } + fdt_setprop(fdt, off, "reg-names", reg_names, names_len); + fdt_delprop(fdt, off, "apio-wins"); + fdt_delprop(fdt, off, "ppio-wins"); + } + + /* Fixup PCIe EP nodes */ + fdt_for_each_node_by_compatible(off, fdt, -1, "fsl,lx2160a-pcie-ep") { + fdt_setprop_string(fdt, off, "compatible", + "fsl,lx2160ar2-pcie-ep"); + prop = fdt_getprop(fdt, off, "apio-wins", NULL); + if (!prop) { + printf("%s: Failed to fixup PCIe EP node @0x%x\n", + __func__, off); + off = fdt_node_offset_by_compatible(fdt, off, + "fsl,lx2160a-pcie-ep"); + continue; + } + + ob_wins = fdt32_to_cpu(*prop); + ib_wins = (ob_wins == 256) ? 24 : 8; + fdt_setprop_u32(fdt, off, "num-ib-windows", ib_wins); + fdt_setprop_u32(fdt, off, "num-ob-windows", ob_wins); + fdt_delprop(fdt, off, "apio-wins"); + } + + return 0; +} + +int pcie_board_fix_fdt(void *fdt) +{ + uint svr; + + svr = SVR_SOC_VER(get_svr()); + + if ((svr == SVR_LX2160A || svr == SVR_LX2162A || + svr == SVR_LX2120A || svr == SVR_LX2080A || + svr == SVR_LX2122A || svr == SVR_LX2082A) && + IS_SVR_REV(get_svr(), 2, 0)) + return lx2_board_fix_fdt(fdt); + + return 0; +} + +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A) +/* returns the next available streamid for pcie, -errno if failed */ +int pcie_next_streamid(int currentid, int idx) +{ + if (currentid > FSL_PEX_STREAM_ID_END) + return -EINVAL; + + return currentid | ((idx + 1) << 11); +} +#else +/* returns the next available streamid for pcie, -errno if failed */ +int pcie_next_streamid(int currentid, int idx) +{ + if (next_stream_id > FSL_PEX_STREAM_ID_END) + return -EINVAL; + + return next_stream_id++; +} +#endif +#endif /* CONFIG_FSL_LAYERSCAPE */ diff --git a/drivers/pci/pcie_layerscape_fixup_common.h b/drivers/pci/pcie_layerscape_fixup_common.h new file mode 100644 index 00000000000..3255b7648c0 --- /dev/null +++ b/drivers/pci/pcie_layerscape_fixup_common.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019-2020 NXP + * + * PCIe DT fixup for NXP Layerscape SoCs + * Author: Wasim Khan <wasim.khan@nxp.com> + * + */ +#ifndef _PCIE_LAYERSCAPE_FIXUP_COMMON_H_ +#define _PCIE_LAYERSCAPE_FIXUP_COMMON_H_ + +void ft_pci_setup_ls(void *blob, struct bd_info *bd); + +#ifdef CONFIG_PCIE_LAYERSCAPE_GEN4 +void ft_pci_setup_ls_gen4(void *blob, struct bd_info *bd); +#endif /* CONFIG_PCIE_LAYERSCAPE_GEN4 */ +int pcie_next_streamid(int currentid, int id); +int pcie_board_fix_fdt(void *fdt); + +#endif //_PCIE_LAYERSCAPE_FIXUP_COMMON_H_ diff --git a/drivers/pci/pcie_layerscape_gen4.c b/drivers/pci/pcie_layerscape_gen4.c new file mode 100644 index 00000000000..57dc91f2fae --- /dev/null +++ b/drivers/pci/pcie_layerscape_gen4.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0+ OR X11 +/* + * Copyright 2018-2021 NXP + * + * PCIe Gen4 driver for NXP Layerscape SoCs + * Author: Hou Zhiqiang <Minder.Hou@gmail.com> + */ + +#include <config.h> +#include <log.h> +#include <asm/arch/fsl_serdes.h> +#include <pci.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <errno.h> +#include <malloc.h> +#include <dm.h> +#include <linux/sizes.h> + +#include "pcie_layerscape_gen4.h" + +DECLARE_GLOBAL_DATA_PTR; + +LIST_HEAD(ls_pcie_g4_list); + +static u64 bar_size[4] = { + PCIE_BAR0_SIZE, + PCIE_BAR1_SIZE, + PCIE_BAR2_SIZE, + PCIE_BAR4_SIZE +}; + +static int ls_pcie_g4_ltssm(struct ls_pcie_g4 *pcie) +{ + u32 state; + + state = pf_ctrl_readl(pcie, PCIE_LTSSM_STA) & LTSSM_STATE_MASK; + + return state; +} + +static int ls_pcie_g4_link_up(struct ls_pcie_g4 *pcie) +{ + int ltssm; + + ltssm = ls_pcie_g4_ltssm(pcie); + if (ltssm != LTSSM_PCIE_L0) + return 0; + + return 1; +} + +static void ls_pcie_g4_ep_enable_cfg(struct ls_pcie_g4 *pcie) +{ + ccsr_writel(pcie, GPEX_CFG_READY, PCIE_CONFIG_READY); +} + +static void ls_pcie_g4_cfg_set_target(struct ls_pcie_g4 *pcie, u32 target) +{ + ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target); + ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0); +} + +static int ls_pcie_g4_outbound_win_set(struct ls_pcie_g4 *pcie, int idx, + int type, u64 phys, u64 bus_addr, + pci_size_t size) +{ + u32 val; + u32 size_h, size_l; + + if (idx >= PAB_WINS_NUM) + return -EINVAL; + + size_h = upper_32_bits(~(size - 1)); + size_l = lower_32_bits(~(size - 1)); + + val = ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(idx)); + val &= ~((AXI_AMAP_CTRL_TYPE_MASK << AXI_AMAP_CTRL_TYPE_SHIFT) | + (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT) | + AXI_AMAP_CTRL_EN); + val |= ((type & AXI_AMAP_CTRL_TYPE_MASK) << AXI_AMAP_CTRL_TYPE_SHIFT) | + ((size_l >> AXI_AMAP_CTRL_SIZE_SHIFT) << + AXI_AMAP_CTRL_SIZE_SHIFT) | AXI_AMAP_CTRL_EN; + + ccsr_writel(pcie, PAB_AXI_AMAP_CTRL(idx), val); + + ccsr_writel(pcie, PAB_AXI_AMAP_AXI_WIN(idx), lower_32_bits(phys)); + ccsr_writel(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(idx), upper_32_bits(phys)); + ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr)); + ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr)); + ccsr_writel(pcie, PAB_EXT_AXI_AMAP_SIZE(idx), size_h); + + return 0; +} + +static int ls_pcie_g4_rc_inbound_win_set(struct ls_pcie_g4 *pcie, int idx, + int type, u64 phys, u64 bus_addr, + pci_size_t size) +{ + u32 val; + pci_size_t win_size = ~(size - 1); + + val = ccsr_readl(pcie, PAB_PEX_AMAP_CTRL(idx)); + + val &= ~(PEX_AMAP_CTRL_TYPE_MASK << PEX_AMAP_CTRL_TYPE_SHIFT); + val &= ~(PEX_AMAP_CTRL_EN_MASK << PEX_AMAP_CTRL_EN_SHIFT); + val = (val | (type << PEX_AMAP_CTRL_TYPE_SHIFT)); + val = (val | (1 << PEX_AMAP_CTRL_EN_SHIFT)); + + ccsr_writel(pcie, PAB_PEX_AMAP_CTRL(idx), + val | lower_32_bits(win_size)); + + ccsr_writel(pcie, PAB_EXT_PEX_AMAP_SIZE(idx), upper_32_bits(win_size)); + ccsr_writel(pcie, PAB_PEX_AMAP_AXI_WIN(idx), lower_32_bits(phys)); + ccsr_writel(pcie, PAB_EXT_PEX_AMAP_AXI_WIN(idx), upper_32_bits(phys)); + ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr)); + ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr)); + + return 0; +} + +static void ls_pcie_g4_dump_wins(struct ls_pcie_g4 *pcie, int wins) +{ + int i; + + for (i = 0; i < wins; i++) { + debug("APIO Win%d:\n", i); + debug("\tLOWER PHYS: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(i))); + debug("\tUPPER PHYS: 0x%08x\n", + ccsr_readl(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(i))); + debug("\tLOWER BUS: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_L(i))); + debug("\tUPPER BUS: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(i))); + debug("\tSIZE: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)) & + (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT)); + debug("\tEXT_SIZE: 0x%08x\n", + ccsr_readl(pcie, PAB_EXT_AXI_AMAP_SIZE(i))); + debug("\tPARAM: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(i))); + debug("\tCTRL: 0x%08x\n", + ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i))); + } +} + +static void ls_pcie_g4_setup_wins(struct ls_pcie_g4 *pcie) +{ + struct pci_region *io, *mem, *pref; + int idx = 1; + + /* INBOUND WIN */ + ls_pcie_g4_rc_inbound_win_set(pcie, 0, IB_TYPE_MEM_F, 0, 0, SIZE_1T); + + /* OUTBOUND WIN 0: CFG */ + ls_pcie_g4_outbound_win_set(pcie, 0, PAB_AXI_TYPE_CFG, + pcie->cfg_res.start, 0, + fdt_resource_size(&pcie->cfg_res)); + + pci_get_regions(pcie->bus, &io, &mem, &pref); + + if (io) + /* OUTBOUND WIN: IO */ + ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_IO, + io->phys_start, io->bus_start, + io->size); + + if (mem) + /* OUTBOUND WIN: MEM */ + ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM, + mem->phys_start, mem->bus_start, + mem->size); + + if (pref) + /* OUTBOUND WIN: perf MEM */ + ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM, + pref->phys_start, pref->bus_start, + pref->size); + + ls_pcie_g4_dump_wins(pcie, idx); +} + +/* Return 0 if the address is valid, -errno if not valid */ +static int ls_pcie_g4_addr_valid(struct ls_pcie_g4 *pcie, pci_dev_t bdf) +{ + struct udevice *bus = pcie->bus; + + if (pcie->mode == PCI_HEADER_TYPE_NORMAL) + return -ENODEV; + + if (!pcie->enabled) + return -ENXIO; + + if (PCI_BUS(bdf) < dev_seq(bus)) + return -EINVAL; + + if ((PCI_BUS(bdf) > dev_seq(bus)) && (!ls_pcie_g4_link_up(pcie))) + return -EINVAL; + + if (PCI_BUS(bdf) <= (dev_seq(bus) + 1) && (PCI_DEV(bdf) > 0)) + return -EINVAL; + + return 0; +} + +void *ls_pcie_g4_conf_address(struct ls_pcie_g4 *pcie, pci_dev_t bdf, + int offset) +{ + struct udevice *bus = pcie->bus; + u32 target; + + if (PCI_BUS(bdf) == dev_seq(bus)) { + if (offset < INDIRECT_ADDR_BNDRY) { + ccsr_set_page(pcie, 0); + return pcie->ccsr + offset; + } + + ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset)); + return pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset); + } + + target = PAB_TARGET_BUS(PCI_BUS(bdf) - dev_seq(bus)) | + PAB_TARGET_DEV(PCI_DEV(bdf)) | + PAB_TARGET_FUNC(PCI_FUNC(bdf)); + + ls_pcie_g4_cfg_set_target(pcie, target); + + return pcie->cfg + offset; +} + +static int ls_pcie_g4_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct ls_pcie_g4 *pcie = dev_get_priv(bus); + void *address; + int ret = 0; + + if (ls_pcie_g4_addr_valid(pcie, bdf)) { + *valuep = pci_get_ff(size); + return 0; + } + + address = ls_pcie_g4_conf_address(pcie, bdf, offset); + + switch (size) { + case PCI_SIZE_8: + *valuep = readb(address); + break; + case PCI_SIZE_16: + *valuep = readw(address); + break; + case PCI_SIZE_32: + *valuep = readl(address); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ls_pcie_g4_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct ls_pcie_g4 *pcie = dev_get_priv(bus); + void *address; + + if (ls_pcie_g4_addr_valid(pcie, bdf)) + return 0; + + address = ls_pcie_g4_conf_address(pcie, bdf, offset); + + switch (size) { + case PCI_SIZE_8: + writeb(value, address); + return 0; + case PCI_SIZE_16: + writew(value, address); + return 0; + case PCI_SIZE_32: + writel(value, address); + return 0; + default: + return -EINVAL; + } +} + +static void ls_pcie_g4_setup_ctrl(struct ls_pcie_g4 *pcie) +{ + u32 val; + + /* Fix class code */ + val = ccsr_readl(pcie, GPEX_CLASSCODE); + val &= ~(GPEX_CLASSCODE_MASK << GPEX_CLASSCODE_SHIFT); + val |= PCI_CLASS_BRIDGE_PCI << GPEX_CLASSCODE_SHIFT; + ccsr_writel(pcie, GPEX_CLASSCODE, val); + + /* Enable APIO and Memory/IO/CFG Wins */ + val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0)); + val |= APIO_EN | MEM_WIN_EN | IO_WIN_EN | CFG_WIN_EN; + ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val); + + ls_pcie_g4_setup_wins(pcie); +} + +static void ls_pcie_g4_ep_inbound_win_set(struct ls_pcie_g4 *pcie, int pf, + int bar, u64 phys) +{ + u32 val; + + /* PF BAR1 is for MSI-X and only need to enable */ + if (bar == 1) { + ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), BAR_AMAP_EN); + return; + } + + val = upper_32_bits(phys); + ccsr_writel(pcie, PAB_EXT_PEX_BAR_AMAP(pf, bar), val); + val = lower_32_bits(phys) | BAR_AMAP_EN; + ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), val); +} + +static void ls_pcie_g4_ep_setup_wins(struct ls_pcie_g4 *pcie, int pf) +{ + u64 phys; + int bar; + u32 val; + + if ((!pcie->sriov_support && pf > LS_G4_PF0) || pf > LS_G4_PF1) + return; + + phys = CFG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR_SIZE * 4 * pf; + for (bar = 0; bar < PF_BAR_NUM; bar++) { + ls_pcie_g4_ep_inbound_win_set(pcie, pf, bar, phys); + phys += PCIE_BAR_SIZE; + } + + /* OUTBOUND: map MEM */ + ls_pcie_g4_outbound_win_set(pcie, pf, PAB_AXI_TYPE_MEM, + pcie->cfg_res.start + + CFG_SYS_PCI_MEMORY_SIZE * pf, 0x0, + CFG_SYS_PCI_MEMORY_SIZE); + + val = ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf)); + val &= ~FUNC_NUM_PCIE_MASK; + val |= pf; + ccsr_writel(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf), val); +} + +static void ls_pcie_g4_ep_enable_bar(struct ls_pcie_g4 *pcie, int pf, + int bar, bool vf_bar, bool enable) +{ + u32 val; + u32 bar_pos = BAR_POS(bar, pf, vf_bar); + + val = ccsr_readl(pcie, GPEX_BAR_ENABLE); + if (enable) + val |= 1 << bar_pos; + else + val &= ~(1 << bar_pos); + ccsr_writel(pcie, GPEX_BAR_ENABLE, val); +} + +static void ls_pcie_g4_ep_set_bar_size(struct ls_pcie_g4 *pcie, int pf, + int bar, bool vf_bar, u64 size) +{ + u32 bar_pos = BAR_POS(bar, pf, vf_bar); + u32 mask_l = lower_32_bits(~(size - 1)); + u32 mask_h = upper_32_bits(~(size - 1)); + + ccsr_writel(pcie, GPEX_BAR_SELECT, bar_pos); + ccsr_writel(pcie, GPEX_BAR_SIZE_LDW, mask_l); + ccsr_writel(pcie, GPEX_BAR_SIZE_UDW, mask_h); +} + +static void ls_pcie_g4_ep_setup_bar(struct ls_pcie_g4 *pcie, int pf, + int bar, bool vf_bar, u64 size) +{ + bool en = size ? true : false; + + ls_pcie_g4_ep_enable_bar(pcie, pf, bar, vf_bar, en); + ls_pcie_g4_ep_set_bar_size(pcie, pf, bar, vf_bar, size); +} + +static void ls_pcie_g4_ep_setup_bars(struct ls_pcie_g4 *pcie, int pf) +{ + int bar; + + /* Setup PF BARs */ + for (bar = 0; bar < PF_BAR_NUM; bar++) + ls_pcie_g4_ep_setup_bar(pcie, pf, bar, false, bar_size[bar]); + + if (!pcie->sriov_support) + return; + + /* Setup VF BARs */ + for (bar = 0; bar < VF_BAR_NUM; bar++) + ls_pcie_g4_ep_setup_bar(pcie, pf, bar, true, bar_size[bar]); +} + +static void ls_pcie_g4_set_sriov(struct ls_pcie_g4 *pcie, int pf) +{ + unsigned int val; + + val = ccsr_readl(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf)); + val &= ~(TTL_VF_MASK << TTL_VF_SHIFT); + val |= PCIE_VF_NUM << TTL_VF_SHIFT; + val &= ~(INI_VF_MASK << INI_VF_SHIFT); + val |= PCIE_VF_NUM << INI_VF_SHIFT; + ccsr_writel(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf), val); + + val = ccsr_readl(pcie, PCIE_SRIOV_VF_OFFSET_STRIDE); + val += PCIE_VF_NUM * pf - pf; + ccsr_writel(pcie, GPEX_SRIOV_VF_OFFSET_STRIDE(pf), val); +} + +static void ls_pcie_g4_setup_ep(struct ls_pcie_g4 *pcie) +{ + u32 pf, sriov; + u32 val; + int i; + + /* Enable APIO and Memory Win */ + val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0)); + val |= APIO_EN | MEM_WIN_EN; + ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val); + + sriov = ccsr_readl(pcie, PCIE_SRIOV_CAPABILITY); + if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) + pcie->sriov_support = 1; + + pf = pcie->sriov_support ? PCIE_PF_NUM : 1; + + for (i = 0; i < pf; i++) { + ls_pcie_g4_ep_setup_bars(pcie, i); + ls_pcie_g4_ep_setup_wins(pcie, i); + if (pcie->sriov_support) + ls_pcie_g4_set_sriov(pcie, i); + } + + ls_pcie_g4_ep_enable_cfg(pcie); + ls_pcie_g4_dump_wins(pcie, pf); +} + +static int ls_pcie_g4_probe(struct udevice *dev) +{ + struct ls_pcie_g4 *pcie = dev_get_priv(dev); + const void *fdt = gd->fdt_blob; + int node = dev_of_offset(dev); + u32 link_ctrl_sta; + u32 val; + int ret; + fdt_size_t cfg_size; + + pcie->bus = dev; + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "ccsr", &pcie->ccsr_res); + if (ret) { + printf("ls-pcie-g4: resource \"ccsr\" not found\n"); + return ret; + } + + pcie->idx = (pcie->ccsr_res.start - PCIE_SYS_BASE_ADDR) / + PCIE_CCSR_SIZE; + + list_add(&pcie->list, &ls_pcie_g4_list); + + pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx)); + if (!pcie->enabled) { + printf("PCIe%d: %s disabled\n", PCIE_SRDS_PRTCL(pcie->idx), + dev->name); + return 0; + } + + pcie->ccsr = map_physmem(pcie->ccsr_res.start, + fdt_resource_size(&pcie->ccsr_res), + MAP_NOCACHE); + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "config", &pcie->cfg_res); + if (ret) { + printf("%s: resource \"config\" not found\n", dev->name); + return ret; + } + + cfg_size = fdt_resource_size(&pcie->cfg_res); + if (cfg_size < SZ_4K) { + printf("PCIe%d: %s Invalid size(0x%llx) for resource \"config\",expected minimum 0x%x\n", + PCIE_SRDS_PRTCL(pcie->idx), dev->name, cfg_size, SZ_4K); + return 0; + } + + pcie->cfg = map_physmem(pcie->cfg_res.start, + fdt_resource_size(&pcie->cfg_res), + MAP_NOCACHE); + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "lut", &pcie->lut_res); + if (ret) { + printf("ls-pcie-g4: resource \"lut\" not found\n"); + return ret; + } + + pcie->lut = map_physmem(pcie->lut_res.start, + fdt_resource_size(&pcie->lut_res), + MAP_NOCACHE); + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "pf_ctrl", &pcie->pf_ctrl_res); + if (ret) { + printf("ls-pcie-g4: resource \"pf_ctrl\" not found\n"); + return ret; + } + + pcie->pf_ctrl = map_physmem(pcie->pf_ctrl_res.start, + fdt_resource_size(&pcie->pf_ctrl_res), + MAP_NOCACHE); + + pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian"); + + debug("%s ccsr:%lx, cfg:0x%lx, big-endian:%d\n", + dev->name, (unsigned long)pcie->ccsr, (unsigned long)pcie->cfg, + pcie->big_endian); + + pcie->mode = readb(pcie->ccsr + PCI_HEADER_TYPE) & 0x7f; + + if (pcie->mode == PCI_HEADER_TYPE_NORMAL) { + printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name, + "Endpoint"); + ls_pcie_g4_setup_ep(pcie); + } else { + printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name, + "Root Complex"); + ls_pcie_g4_setup_ctrl(pcie); + } + + /* Enable Amba & PEX PIO */ + val = ccsr_readl(pcie, PAB_CTRL); + val |= PAB_CTRL_APIO_EN | PAB_CTRL_PPIO_EN; + ccsr_writel(pcie, PAB_CTRL, val); + + val = ccsr_readl(pcie, PAB_PEX_PIO_CTRL(0)); + val |= PPIO_EN; + ccsr_writel(pcie, PAB_PEX_PIO_CTRL(0), val); + + if (!ls_pcie_g4_link_up(pcie)) { + /* Let the user know there's no PCIe link */ + printf(": no link\n"); + return 0; + } + + /* Print the negotiated PCIe link width */ + link_ctrl_sta = ccsr_readl(pcie, PCIE_LINK_CTRL_STA); + printf(": x%d gen%d\n", + (link_ctrl_sta >> PCIE_LINK_WIDTH_SHIFT & PCIE_LINK_WIDTH_MASK), + (link_ctrl_sta >> PCIE_LINK_SPEED_SHIFT) & PCIE_LINK_SPEED_MASK); + + return 0; +} + +static const struct dm_pci_ops ls_pcie_g4_ops = { + .read_config = ls_pcie_g4_read_config, + .write_config = ls_pcie_g4_write_config, +}; + +static const struct udevice_id ls_pcie_g4_ids[] = { + { .compatible = "fsl,lx2160a-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_layerscape_gen4) = { + .name = "pcie_layerscape_gen4", + .id = UCLASS_PCI, + .of_match = ls_pcie_g4_ids, + .ops = &ls_pcie_g4_ops, + .probe = ls_pcie_g4_probe, + .priv_auto = sizeof(struct ls_pcie_g4), +}; diff --git a/drivers/pci/pcie_layerscape_gen4.h b/drivers/pci/pcie_layerscape_gen4.h new file mode 100644 index 00000000000..805c23a7da0 --- /dev/null +++ b/drivers/pci/pcie_layerscape_gen4.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018-2019 NXP + * + * PCIe Gen4 driver for NXP Layerscape SoCs + * Author: Hou Zhiqiang <Minder.Hou@gmail.com> + */ + +#ifndef _PCIE_LAYERSCAPE_GEN4_H_ +#define _PCIE_LAYERSCAPE_GEN4_H_ +#include <pci.h> +#include <linux/bitops.h> + +#ifndef CFG_SYS_PCI_MEMORY_SIZE +#define CFG_SYS_PCI_MEMORY_SIZE (4 * 1024 * 1024 * 1024ULL) +#endif + +#ifndef CFG_SYS_PCI_EP_MEMORY_BASE +#define CFG_SYS_PCI_EP_MEMORY_BASE CONFIG_SYS_LOAD_ADDR +#endif + +#define PCIE_PF_NUM 2 +#define PCIE_VF_NUM 32 + +#define LS_G4_PF0 0 +#define LS_G4_PF1 1 +#define PF_BAR_NUM 4 +#define VF_BAR_NUM 4 +#define PCIE_BAR_SIZE (8 * 1024) /* 8K */ +#define PCIE_BAR0_SIZE PCIE_BAR_SIZE +#define PCIE_BAR1_SIZE PCIE_BAR_SIZE +#define PCIE_BAR2_SIZE PCIE_BAR_SIZE +#define PCIE_BAR4_SIZE PCIE_BAR_SIZE +#define SIZE_1T (1024 * 1024 * 1024 * 1024ULL) + +/* GPEX CSR */ +#define GPEX_CLASSCODE 0x474 +#define GPEX_CLASSCODE_SHIFT 16 +#define GPEX_CLASSCODE_MASK 0xffff + +#define GPEX_CFG_READY 0x4b0 +#define PCIE_CONFIG_READY BIT(0) + +#define GPEX_BAR_ENABLE 0x4d4 +#define GPEX_BAR_SIZE_LDW 0x4d8 +#define GPEX_BAR_SIZE_UDW 0x4dC +#define GPEX_BAR_SELECT 0x4e0 + +#define BAR_POS(bar, pf, vf_bar) \ + ((bar) + (pf) * PF_BAR_NUM + (vf_bar) * PCIE_PF_NUM * PF_BAR_NUM) + +#define GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf) (0x644 + (pf) * 4) +#define TTL_VF_MASK 0xffff +#define TTL_VF_SHIFT 16 +#define INI_VF_MASK 0xffff +#define INI_VF_SHIFT 0 +#define GPEX_SRIOV_VF_OFFSET_STRIDE(pf) (0x704 + (pf) * 4) + +/* PAB CSR */ +#define PAB_CTRL 0x808 +#define PAB_CTRL_APIO_EN BIT(0) +#define PAB_CTRL_PPIO_EN BIT(1) +#define PAB_CTRL_MAX_BRST_LEN_SHIFT 4 +#define PAB_CTRL_MAX_BRST_LEN_MASK 0x3 +#define PAB_CTRL_PAGE_SEL_SHIFT 13 +#define PAB_CTRL_PAGE_SEL_MASK 0x3f +#define PAB_CTRL_FUNC_SEL_SHIFT 19 +#define PAB_CTRL_FUNC_SEL_MASK 0x1ff + +#define PAB_RST_CTRL 0x820 +#define PAB_BR_STAT 0x80c + +/* AXI PIO Engines */ +#define PAB_AXI_PIO_CTRL(idx) (0x840 + 0x10 * (idx)) +#define APIO_EN BIT(0) +#define MEM_WIN_EN BIT(1) +#define IO_WIN_EN BIT(2) +#define CFG_WIN_EN BIT(3) +#define PAB_AXI_PIO_STAT(idx) (0x844 + 0x10 * (idx)) +#define PAB_AXI_PIO_SL_CMD_STAT(idx) (0x848 + 0x10 * (idx)) +#define PAB_AXI_PIO_SL_ADDR_STAT(idx) (0x84c + 0x10 * (idx)) +#define PAB_AXI_PIO_SL_EXT_ADDR_STAT(idx) (0xb8a0 + 0x4 * (idx)) + +/* PEX PIO Engines */ +#define PAB_PEX_PIO_CTRL(idx) (0x8c0 + 0x10 * (idx)) +#define PPIO_EN BIT(0) +#define PAB_PEX_PIO_STAT(idx) (0x8c4 + 0x10 * (idx)) +#define PAB_PEX_PIO_MT_STAT(idx) (0x8c8 + 0x10 * (idx)) + +#define INDIRECT_ADDR_BNDRY 0xc00 +#define PAGE_IDX_SHIFT 10 +#define PAGE_ADDR_MASK 0x3ff + +#define OFFSET_TO_PAGE_IDX(off) \ + (((off) >> PAGE_IDX_SHIFT) & PAB_CTRL_PAGE_SEL_MASK) + +#define OFFSET_TO_PAGE_ADDR(off) \ + (((off) & PAGE_ADDR_MASK) | INDIRECT_ADDR_BNDRY) + +/* APIO WINs */ +#define PAB_AXI_AMAP_CTRL(idx) (0xba0 + 0x10 * (idx)) +#define PAB_EXT_AXI_AMAP_SIZE(idx) (0xbaf0 + 0x4 * (idx)) +#define PAB_AXI_AMAP_AXI_WIN(idx) (0xba4 + 0x10 * (idx)) +#define PAB_EXT_AXI_AMAP_AXI_WIN(idx) (0x80a0 + 0x4 * (idx)) +#define PAB_AXI_AMAP_PEX_WIN_L(idx) (0xba8 + 0x10 * (idx)) +#define PAB_AXI_AMAP_PEX_WIN_H(idx) (0xbac + 0x10 * (idx)) +#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x4 * (idx)) +#define FUNC_NUM_PCIE_MASK GENMASK(7, 0) + +#define AXI_AMAP_CTRL_EN BIT(0) +#define AXI_AMAP_CTRL_TYPE_SHIFT 1 +#define AXI_AMAP_CTRL_TYPE_MASK 0x3 +#define AXI_AMAP_CTRL_SIZE_SHIFT 10 +#define AXI_AMAP_CTRL_SIZE_MASK 0x3fffff + +#define PAB_TARGET_BUS(x) (((x) & 0xff) << 24) +#define PAB_TARGET_DEV(x) (((x) & 0x1f) << 19) +#define PAB_TARGET_FUNC(x) (((x) & 0x7) << 16) + +#define PAB_AXI_TYPE_CFG 0x00 +#define PAB_AXI_TYPE_IO 0x01 +#define PAB_AXI_TYPE_MEM 0x02 +#define PAB_AXI_TYPE_ATOM 0x03 + +#define PAB_WINS_NUM 256 + +/* PPIO WINs RC mode */ +#define PAB_PEX_AMAP_CTRL(idx) (0x4ba0 + 0x10 * (idx)) +#define PAB_EXT_PEX_AMAP_SIZE(idx) (0xbef0 + 0x04 * (idx)) +#define PAB_PEX_AMAP_AXI_WIN(idx) (0x4ba4 + 0x10 * (idx)) +#define PAB_EXT_PEX_AMAP_AXI_WIN(idx) (0xb4a0 + 0x04 * (idx)) +#define PAB_PEX_AMAP_PEX_WIN_L(idx) (0x4ba8 + 0x10 * (idx)) +#define PAB_PEX_AMAP_PEX_WIN_H(idx) (0x4bac + 0x10 * (idx)) + +#define IB_TYPE_MEM_F 0x2 +#define IB_TYPE_MEM_NF 0x3 + +#define PEX_AMAP_CTRL_TYPE_SHIFT 0x1 +#define PEX_AMAP_CTRL_EN_SHIFT 0x0 +#define PEX_AMAP_CTRL_TYPE_MASK 0x3 +#define PEX_AMAP_CTRL_EN_MASK 0x1 + +/* PPIO WINs EP mode */ +#define PAB_PEX_BAR_AMAP(pf, bar) \ + (0x1ba0 + 0x20 * (pf) + 4 * (bar)) +#define BAR_AMAP_EN BIT(0) +#define PAB_EXT_PEX_BAR_AMAP(pf, bar) \ + (0x84a0 + 0x20 * (pf) + 4 * (bar)) + +/* CCSR registers */ +#define PCIE_LINK_CTRL_STA 0x5c +#define PCIE_LINK_SPEED_SHIFT 16 +#define PCIE_LINK_SPEED_MASK 0x0f +#define PCIE_LINK_WIDTH_SHIFT 20 +#define PCIE_LINK_WIDTH_MASK 0x3f +#define PCIE_SRIOV_CAPABILITY 0x2a0 +#define PCIE_SRIOV_VF_OFFSET_STRIDE 0x2b4 + +/* LUT registers */ +#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) +#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) +#define PCIE_LUT_ENABLE BIT(31) +#define PCIE_LUT_ENTRY_COUNT 32 + +/* PF control registers */ +#define PCIE_LTSSM_STA 0x7fc +#define LTSSM_STATE_MASK 0x7f +#define LTSSM_PCIE_L0 0x2d /* L0 state */ + +#define PCIE_SRDS_PRTCL(idx) (PCIE1 + (idx)) +#define PCIE_SYS_BASE_ADDR 0x3400000 +#define PCIE_CCSR_SIZE 0x0100000 + +struct ls_pcie_g4 { + int idx; + struct list_head list; + struct udevice *bus; + struct fdt_resource ccsr_res; + struct fdt_resource cfg_res; + struct fdt_resource lut_res; + struct fdt_resource pf_ctrl_res; + void __iomem *ccsr; + void __iomem *cfg; + void __iomem *lut; + void __iomem *pf_ctrl; + bool big_endian; + bool enabled; + int next_lut_index; + struct pci_controller hose; + int stream_id_cur; + int mode; + int sriov_support; +}; + +extern struct list_head ls_pcie_g4_list; + +static inline void lut_writel(struct ls_pcie_g4 *pcie, unsigned int value, + unsigned int offset) +{ + if (pcie->big_endian) + out_be32(pcie->lut + offset, value); + else + out_le32(pcie->lut + offset, value); +} + +static inline u32 lut_readl(struct ls_pcie_g4 *pcie, unsigned int offset) +{ + if (pcie->big_endian) + return in_be32(pcie->lut + offset); + else + return in_le32(pcie->lut + offset); +} + +static inline void ccsr_set_page(struct ls_pcie_g4 *pcie, u8 pg_idx) +{ + u32 val; + + val = in_le32(pcie->ccsr + PAB_CTRL); + val &= ~(PAB_CTRL_PAGE_SEL_MASK << PAB_CTRL_PAGE_SEL_SHIFT); + val |= (pg_idx & PAB_CTRL_PAGE_SEL_MASK) << PAB_CTRL_PAGE_SEL_SHIFT; + + out_le32(pcie->ccsr + PAB_CTRL, val); +} + +static inline unsigned int ccsr_readl(struct ls_pcie_g4 *pcie, u32 offset) +{ + if (offset < INDIRECT_ADDR_BNDRY) { + ccsr_set_page(pcie, 0); + return in_le32(pcie->ccsr + offset); + } + + ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset)); + return in_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset)); +} + +static inline void ccsr_writel(struct ls_pcie_g4 *pcie, u32 offset, u32 value) +{ + if (offset < INDIRECT_ADDR_BNDRY) { + ccsr_set_page(pcie, 0); + out_le32(pcie->ccsr + offset, value); + } else { + ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset)); + out_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset), value); + } +} + +static inline unsigned int pf_ctrl_readl(struct ls_pcie_g4 *pcie, u32 offset) +{ + if (pcie->big_endian) + return in_be32(pcie->pf_ctrl + offset); + else + return in_le32(pcie->pf_ctrl + offset); +} + +static inline void pf_ctrl_writel(struct ls_pcie_g4 *pcie, u32 offset, + u32 value) +{ + if (pcie->big_endian) + out_be32(pcie->pf_ctrl + offset, value); + else + out_le32(pcie->pf_ctrl + offset, value); +} + +#endif /* _PCIE_LAYERSCAPE_GEN4_H_ */ diff --git a/drivers/pci/pcie_layerscape_gen4_fixup.c b/drivers/pci/pcie_layerscape_gen4_fixup.c new file mode 100644 index 00000000000..60c4338bcdb --- /dev/null +++ b/drivers/pci/pcie_layerscape_gen4_fixup.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0+ OR X11 +/* + * Copyright 2018-2021 NXP + * + * PCIe Gen4 driver for NXP Layerscape SoCs + * Author: Hou Zhiqiang <Minder.Hou@gmail.com> + * + */ + +#include <dm.h> +#include <log.h> +#include <pci.h> +#include <asm/arch/fsl_serdes.h> +#include <asm/io.h> +#include <errno.h> +#ifdef CONFIG_OF_BOARD_SETUP +#include <linux/libfdt.h> +#include <fdt_support.h> +#ifdef CONFIG_ARM +#include <asm/arch/clock.h> +#endif +#include "pcie_layerscape_gen4.h" +#include "pcie_layerscape_fixup_common.h" + +#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) +/* + * Return next available LUT index. + */ +static int ls_pcie_g4_next_lut_index(struct ls_pcie_g4 *pcie) +{ + if (pcie->next_lut_index < PCIE_LUT_ENTRY_COUNT) + return pcie->next_lut_index++; + + return -ENOSPC; /* LUT is full */ +} + +/* + * Program a single LUT entry + */ +static void ls_pcie_g4_lut_set_mapping(struct ls_pcie_g4 *pcie, int index, + u32 devid, u32 streamid) +{ + /* leave mask as all zeroes, want to match all bits */ + lut_writel(pcie, devid << 16, PCIE_LUT_UDR(index)); + lut_writel(pcie, streamid | PCIE_LUT_ENABLE, PCIE_LUT_LDR(index)); +} + +/* + * An msi-map is a property to be added to the pci controller + * node. It is a table, where each entry consists of 4 fields + * e.g.: + * + * msi-map = <[devid] [phandle-to-msi-ctrl] [stream-id] [count] + * [devid] [phandle-to-msi-ctrl] [stream-id] [count]>; + */ +static void fdt_pcie_set_msi_map_entry_ls_gen4(void *blob, + struct ls_pcie_g4 *pcie, + u32 devid, u32 streamid) +{ + u32 *prop; + u32 phandle; + int nodeoff; + +#ifdef CONFIG_FSL_PCIE_COMPAT + nodeoff = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT, + pcie->ccsr_res.start); +#else +#error "No CONFIG_FSL_PCIE_COMPAT defined" +#endif + if (nodeoff < 0) { + debug("%s: ERROR: failed to find pcie compatiable\n", __func__); + return; + } + + /* get phandle to MSI controller */ + prop = (u32 *)fdt_getprop(blob, nodeoff, "msi-parent", 0); + if (!prop) { + debug("\n%s: ERROR: missing msi-parent: PCIe%d\n", + __func__, pcie->idx); + return; + } + phandle = fdt32_to_cpu(*prop); + + /* set one msi-map row */ + fdt_appendprop_u32(blob, nodeoff, "msi-map", devid); + fdt_appendprop_u32(blob, nodeoff, "msi-map", phandle); + fdt_appendprop_u32(blob, nodeoff, "msi-map", streamid); + fdt_appendprop_u32(blob, nodeoff, "msi-map", 1); +} + +/* + * An iommu-map is a property to be added to the pci controller + * node. It is a table, where each entry consists of 4 fields + * e.g.: + * + * iommu-map = <[devid] [phandle-to-iommu-ctrl] [stream-id] [count] + * [devid] [phandle-to-iommu-ctrl] [stream-id] [count]>; + */ +static void fdt_pcie_set_iommu_map_entry_ls_gen4(void *blob, + struct ls_pcie_g4 *pcie, + u32 devid, u32 streamid) +{ + u32 *prop; + u32 iommu_map[4]; + int nodeoff; + int lenp; + +#ifdef CONFIG_FSL_PCIE_COMPAT + nodeoff = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT, + pcie->ccsr_res.start); +#else +#error "No CONFIG_FSL_PCIE_COMPAT defined" +#endif + if (nodeoff < 0) { + debug("%s: ERROR: failed to find pcie compatiable\n", __func__); + return; + } + + /* get phandle to iommu controller */ + prop = fdt_getprop_w(blob, nodeoff, "iommu-map", &lenp); + if (!prop) { + debug("\n%s: ERROR: missing iommu-map: PCIe%d\n", + __func__, pcie->idx); + return; + } + + /* set iommu-map row */ + iommu_map[0] = cpu_to_fdt32(devid); + iommu_map[1] = *++prop; + iommu_map[2] = cpu_to_fdt32(streamid); + iommu_map[3] = cpu_to_fdt32(1); + + if (devid == 0) + fdt_setprop_inplace(blob, nodeoff, "iommu-map", iommu_map, 16); + else + fdt_appendprop(blob, nodeoff, "iommu-map", iommu_map, 16); +} + +static void fdt_fixup_pcie_ls_gen4(void *blob) +{ + struct udevice *dev, *bus; + struct ls_pcie_g4 *pcie; + int streamid; + int index; + pci_dev_t bdf; + + /* Scan all known buses */ + for (pci_find_first_device(&dev); dev; pci_find_next_device(&dev)) { + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + pcie = dev_get_priv(bus); + + streamid = pcie_next_streamid(pcie->stream_id_cur, pcie->idx); + if (streamid < 0) { + debug("ERROR: no stream ids free\n"); + continue; + } else { + pcie->stream_id_cur++; + } + + index = ls_pcie_g4_next_lut_index(pcie); + if (index < 0) { + debug("ERROR: no LUT indexes free\n"); + continue; + } + + /* the DT fixup must be relative to the hose first_busno */ + bdf = dm_pci_get_bdf(dev) - PCI_BDF(dev_seq(bus), 0, 0); + /* map PCI b.d.f to streamID in LUT */ + ls_pcie_g4_lut_set_mapping(pcie, index, bdf >> 8, streamid); + /* update msi-map in device tree */ + fdt_pcie_set_msi_map_entry_ls_gen4(blob, pcie, bdf >> 8, + streamid); + /* update iommu-map in device tree */ + fdt_pcie_set_iommu_map_entry_ls_gen4(blob, pcie, bdf >> 8, + streamid); + } +} +#endif + +static void ft_pcie_ep_layerscape_gen4_fix(void *blob, struct ls_pcie_g4 *pcie) +{ + int off; + + off = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_EP_COMPAT, + pcie->ccsr_res.start); + + if (off < 0) { + debug("%s: ERROR: failed to find pcie compatiable\n", + __func__); + return; + } + + if (pcie->enabled && pcie->mode == PCI_HEADER_TYPE_NORMAL) + fdt_set_node_status(blob, off, FDT_STATUS_OKAY); + else + fdt_set_node_status(blob, off, FDT_STATUS_DISABLED); +} + +static void ft_pcie_rc_layerscape_gen4_fix(void *blob, struct ls_pcie_g4 *pcie) +{ + int off; + +#ifdef CONFIG_FSL_PCIE_COMPAT + off = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT, + pcie->ccsr_res.start); +#else +#error "No CONFIG_FSL_PCIE_COMPAT defined" +#endif + if (off < 0) { + debug("%s: ERROR: failed to find pcie compatiable\n", __func__); + return; + } + + if (pcie->enabled && pcie->mode == PCI_HEADER_TYPE_BRIDGE) + fdt_set_node_status(blob, off, FDT_STATUS_OKAY); + else + fdt_set_node_status(blob, off, FDT_STATUS_DISABLED); +} + +static void ft_pcie_layerscape_gen4_setup(void *blob, struct ls_pcie_g4 *pcie) +{ + ft_pcie_rc_layerscape_gen4_fix(blob, pcie); + ft_pcie_ep_layerscape_gen4_fix(blob, pcie); + + pcie->stream_id_cur = 0; + pcie->next_lut_index = 0; +} + +/* Fixup Kernel DT for PCIe */ +void ft_pci_setup_ls_gen4(void *blob, struct bd_info *bd) +{ + struct ls_pcie_g4 *pcie; + + list_for_each_entry(pcie, &ls_pcie_g4_list, list) + ft_pcie_layerscape_gen4_setup(blob, pcie); + +#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) + fdt_fixup_pcie_ls_gen4(blob); +#endif +} + +#else /* !CONFIG_OF_BOARD_SETUP */ +void ft_pci_setup_ls_gen4(void *blob, struct bd_info *bd) +{ +} +#endif diff --git a/drivers/pci/pcie_layerscape_rc.c b/drivers/pci/pcie_layerscape_rc.c new file mode 100644 index 00000000000..e7913d43a8b --- /dev/null +++ b/drivers/pci/pcie_layerscape_rc.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020,2021 NXP + * Layerscape PCIe driver + */ + +#include <asm/arch/fsl_serdes.h> +#include <pci.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <errno.h> +#include <malloc.h> +#include <dm.h> +#include <dm/devres.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif +#include "pcie_layerscape.h" + +DECLARE_GLOBAL_DATA_PTR; + +struct ls_pcie_drvdata { + u32 lut_offset; + u32 ctrl_offset; + bool big_endian; +}; + +static void ls_pcie_cfg0_set_busdev(struct ls_pcie_rc *pcie_rc, u32 busdev) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, + PCIE_ATU_VIEWPORT); + dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET); +} + +static void ls_pcie_cfg1_set_busdev(struct ls_pcie_rc *pcie_rc, u32 busdev) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, + PCIE_ATU_VIEWPORT); + dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET); +} + +static void ls_pcie_setup_atu(struct ls_pcie_rc *pcie_rc) +{ + struct pci_region *io, *mem, *pref; + unsigned long long offset = 0; + struct ls_pcie *pcie = pcie_rc->pcie; + int idx = 0; + uint svr; + + svr = get_svr(); + if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) { + offset = LS1021_PCIE_SPACE_OFFSET + + LS1021_PCIE_SPACE_SIZE * pcie->idx; + } + + /* ATU 0 : OUTBOUND : CFG0 */ + ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_CFG0, + pcie_rc->cfg_res.start + offset, + 0, + fdt_resource_size(&pcie_rc->cfg_res) / 2); + /* ATU 1 : OUTBOUND : CFG1 */ + ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_CFG1, + pcie_rc->cfg_res.start + offset + + fdt_resource_size(&pcie_rc->cfg_res) / 2, + 0, + fdt_resource_size(&pcie_rc->cfg_res) / 2); + + pci_get_regions(pcie_rc->bus, &io, &mem, &pref); + idx = PCIE_ATU_REGION_INDEX1 + 1; + + /* Fix the pcie memory map for LS2088A series SoCs */ + svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; + if (svr == SVR_LS2088A || svr == SVR_LS2084A || + svr == SVR_LS2048A || svr == SVR_LS2044A || + svr == SVR_LS2081A || svr == SVR_LS2041A) { + if (io) + io->phys_start = (io->phys_start & + (PCIE_PHYS_SIZE - 1)) + + LS2088A_PCIE1_PHYS_ADDR + + LS2088A_PCIE_PHYS_SIZE * pcie->idx; + if (mem) + mem->phys_start = (mem->phys_start & + (PCIE_PHYS_SIZE - 1)) + + LS2088A_PCIE1_PHYS_ADDR + + LS2088A_PCIE_PHYS_SIZE * pcie->idx; + if (pref) + pref->phys_start = (pref->phys_start & + (PCIE_PHYS_SIZE - 1)) + + LS2088A_PCIE1_PHYS_ADDR + + LS2088A_PCIE_PHYS_SIZE * pcie->idx; + } + + if (io) + /* ATU : OUTBOUND : IO */ + ls_pcie_atu_outbound_set(pcie, idx++, + PCIE_ATU_TYPE_IO, + io->phys_start + offset, + io->bus_start, + io->size); + + if (mem) + /* ATU : OUTBOUND : MEM */ + ls_pcie_atu_outbound_set(pcie, idx++, + PCIE_ATU_TYPE_MEM, + mem->phys_start + offset, + mem->bus_start, + mem->size); + + if (pref) + /* ATU : OUTBOUND : pref */ + ls_pcie_atu_outbound_set(pcie, idx++, + PCIE_ATU_TYPE_MEM, + pref->phys_start + offset, + pref->bus_start, + pref->size); + + ls_pcie_dump_atu(pcie, PCIE_ATU_REGION_NUM, PCIE_ATU_REGION_OUTBOUND); +} + +/* Return 0 if the address is valid, -errno if not valid */ +static int ls_pcie_addr_valid(struct ls_pcie_rc *pcie_rc, pci_dev_t bdf) +{ + struct udevice *bus = pcie_rc->bus; + struct ls_pcie *pcie = pcie_rc->pcie; + + if (pcie->mode == PCI_HEADER_TYPE_NORMAL) + return -ENODEV; + + if (!pcie_rc->enabled) + return -ENXIO; + + if (PCI_BUS(bdf) < dev_seq(bus)) + return -EINVAL; + + if ((PCI_BUS(bdf) > dev_seq(bus)) && (!ls_pcie_link_up(pcie))) + return -EINVAL; + + if (PCI_BUS(bdf) <= (dev_seq(bus) + 1) && (PCI_DEV(bdf) > 0)) + return -EINVAL; + + return 0; +} + +static int ls_pcie_conf_address(const struct udevice *bus, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct ls_pcie_rc *pcie_rc = dev_get_priv(bus); + struct ls_pcie *pcie = pcie_rc->pcie; + u32 busdev; + + if (ls_pcie_addr_valid(pcie_rc, bdf)) + return -EINVAL; + + if (PCI_BUS(bdf) == dev_seq(bus)) { + *paddress = pcie->dbi + offset; + return 0; + } + + busdev = PCIE_ATU_BUS(PCI_BUS(bdf) - dev_seq(bus)) | + PCIE_ATU_DEV(PCI_DEV(bdf)) | + PCIE_ATU_FUNC(PCI_FUNC(bdf)); + + if (PCI_BUS(bdf) == dev_seq(bus) + 1) { + ls_pcie_cfg0_set_busdev(pcie_rc, busdev); + *paddress = pcie_rc->cfg0 + offset; + } else { + ls_pcie_cfg1_set_busdev(pcie_rc, busdev); + *paddress = pcie_rc->cfg1 + offset; + } + return 0; +} + +static int ls_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, ls_pcie_conf_address, + bdf, offset, valuep, size); +} + +static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, ls_pcie_conf_address, + bdf, offset, value, size); +} + +/* Clear multi-function bit */ +static void ls_pcie_clear_multifunction(struct ls_pcie_rc *pcie_rc) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); +} + +/* Fix class value */ +static void ls_pcie_fix_class(struct ls_pcie_rc *pcie_rc) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); +} + +/* Drop MSG TLP except for Vendor MSG */ +static void ls_pcie_drop_msg_tlp(struct ls_pcie_rc *pcie_rc) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + u32 val; + + val = dbi_readl(pcie, PCIE_STRFMR1); + val &= 0xDFFFFFFF; + dbi_writel(pcie, val, PCIE_STRFMR1); +} + +/* Disable all bars in RC mode */ +static void ls_pcie_disable_bars(struct ls_pcie_rc *pcie_rc) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); + dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); + dbi_writel(pcie, 0xfffffffe, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); +} + +static void ls_pcie_setup_ctrl(struct ls_pcie_rc *pcie_rc) +{ + struct ls_pcie *pcie = pcie_rc->pcie; + + ls_pcie_setup_atu(pcie_rc); + + ls_pcie_dbi_ro_wr_en(pcie); + ls_pcie_fix_class(pcie_rc); + ls_pcie_clear_multifunction(pcie_rc); + ls_pcie_drop_msg_tlp(pcie_rc); + ls_pcie_dbi_ro_wr_dis(pcie); + + ls_pcie_disable_bars(pcie_rc); +} + +static int ls_pcie_probe(struct udevice *dev) +{ + const struct ls_pcie_drvdata *drvdata = (void *)dev_get_driver_data(dev); + struct ls_pcie_rc *pcie_rc = dev_get_priv(dev); + const void *fdt = gd->fdt_blob; + int node = dev_of_offset(dev); + struct ls_pcie *pcie; + u16 link_sta; + uint svr; + int ret; + fdt_size_t cfg_size; + + pcie_rc->bus = dev; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie_rc->pcie = pcie; + + /* try resource name of the official binding first */ + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "regs", &pcie_rc->dbi_res); + if (ret) + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "dbi", &pcie_rc->dbi_res); + if (ret) { + printf("ls-pcie: resource \"dbi\" not found\n"); + return ret; + } + + pcie->idx = (pcie_rc->dbi_res.start - PCIE_SYS_BASE_ADDR) / + PCIE_CCSR_SIZE; + + list_add(&pcie_rc->list, &ls_pcie_list); + + pcie_rc->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx)); + if (!pcie_rc->enabled) { + printf("PCIe%d: %s disabled\n", PCIE_SRDS_PRTCL(pcie->idx), + dev->name); + return 0; + } + + pcie->dbi = map_physmem(pcie_rc->dbi_res.start, + fdt_resource_size(&pcie_rc->dbi_res), + MAP_NOCACHE); + + pcie->mode = readb(pcie->dbi + PCI_HEADER_TYPE) & 0x7f; + if (pcie->mode == PCI_HEADER_TYPE_NORMAL) + return 0; + + if (drvdata) { + pcie->lut = pcie->dbi + drvdata->lut_offset; + } else { + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "lut", &pcie_rc->lut_res); + if (!ret) + pcie->lut = map_physmem(pcie_rc->lut_res.start, + fdt_resource_size(&pcie_rc->lut_res), + MAP_NOCACHE); + } + + if (drvdata) { + pcie->ctrl = pcie->lut + drvdata->ctrl_offset; + } else { + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "ctrl", &pcie_rc->ctrl_res); + if (!ret) + pcie->ctrl = map_physmem(pcie_rc->ctrl_res.start, + fdt_resource_size(&pcie_rc->ctrl_res), + MAP_NOCACHE); + if (!pcie->ctrl) + pcie->ctrl = pcie->lut; + } + + if (!pcie->ctrl) { + printf("%s: NOT find CTRL\n", dev->name); + return -1; + } + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "config", &pcie_rc->cfg_res); + if (ret) { + printf("%s: resource \"config\" not found\n", dev->name); + return ret; + } + + cfg_size = fdt_resource_size(&pcie_rc->cfg_res); + if (cfg_size < SZ_8K) { + printf("PCIe%d: %s Invalid size(0x%llx) for resource \"config\",expected minimum 0x%x\n", + PCIE_SRDS_PRTCL(pcie->idx), dev->name, (u64)cfg_size, SZ_8K); + return 0; + } + + /* + * Fix the pcie memory map address and PF control registers address + * for LS2088A series SoCs + */ + svr = get_svr(); + svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; + if (svr == SVR_LS2088A || svr == SVR_LS2084A || + svr == SVR_LS2048A || svr == SVR_LS2044A || + svr == SVR_LS2081A || svr == SVR_LS2041A) { + pcie_rc->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR + + LS2088A_PCIE_PHYS_SIZE * pcie->idx; + pcie_rc->cfg_res.end = pcie_rc->cfg_res.start + cfg_size; + pcie->ctrl = pcie->lut + 0x40000; + } + + pcie_rc->cfg0 = map_physmem(pcie_rc->cfg_res.start, + fdt_resource_size(&pcie_rc->cfg_res), + MAP_NOCACHE); + pcie_rc->cfg1 = pcie_rc->cfg0 + + fdt_resource_size(&pcie_rc->cfg_res) / 2; + + if (drvdata) + pcie->big_endian = drvdata->big_endian; + else + pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian"); + + debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n", + dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut, + (unsigned long)pcie->ctrl, (unsigned long)pcie_rc->cfg0, + pcie->big_endian); + + printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name, + "Root Complex"); + ls_pcie_setup_ctrl(pcie_rc); + + if (!ls_pcie_link_up(pcie)) { + /* Let the user know there's no PCIe link */ + printf(": no link\n"); + return 0; + } + + /* Print the negotiated PCIe link width */ + link_sta = readw(pcie->dbi + PCIE_LINK_STA); + printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4, + link_sta & PCIE_LINK_SPEED_MASK); + + return 0; +} + +static const struct dm_pci_ops ls_pcie_ops = { + .read_config = ls_pcie_read_config, + .write_config = ls_pcie_write_config, +}; + +static const struct ls_pcie_drvdata ls1028a_drvdata = { + .lut_offset = 0x80000, + .ctrl_offset = 0x40000, + .big_endian = false, +}; + +static const struct udevice_id ls_pcie_ids[] = { + { .compatible = "fsl,ls-pcie" }, + { .compatible = "fsl,ls1028a-pcie", .data = (ulong)&ls1028a_drvdata }, + { .compatible = "fsl,ls1088a-pcie", .data = (ulong)&ls1028a_drvdata }, + { } +}; + +U_BOOT_DRIVER(pci_layerscape) = { + .name = "pci_layerscape", + .id = UCLASS_PCI, + .of_match = ls_pcie_ids, + .ops = &ls_pcie_ops, + .probe = ls_pcie_probe, + .priv_auto = sizeof(struct ls_pcie_rc), +}; diff --git a/drivers/pci/pcie_mediatek.c b/drivers/pci/pcie_mediatek.c new file mode 100644 index 00000000000..04d8cc29afd --- /dev/null +++ b/drivers/pci/pcie_mediatek.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2017-2019 MediaTek Inc. + * Author: Ryder Lee <ryder.lee@mediatek.com> + * Honghui Zhang <honghui.zhang@mediatek.com> + */ + +#include <clk.h> +#include <dm.h> +#include <generic-phy.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <reset.h> +#include <asm/io.h> +#include <dm/devres.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/list.h> +#include <linux/printk.h> +#include "pci_internal.h" + +/* PCIe shared registers */ +#define PCIE_SYS_CFG 0x00 +#define PCIE_INT_ENABLE 0x0c +#define PCIE_CFG_ADDR 0x20 +#define PCIE_CFG_DATA 0x24 + +/* PCIe per port registers */ +#define PCIE_BAR0_SETUP 0x10 +#define PCIE_CLASS 0x34 +#define PCIE_LINK_STATUS 0x50 + +#define PCIE_PORT_INT_EN(x) BIT(20 + (x)) +#define PCIE_PORT_PERST(x) BIT(1 + (x)) +#define PCIE_PORT_LINKUP BIT(0) +#define PCIE_BAR_MAP_MAX GENMASK(31, 16) + +#define PCIE_BAR_ENABLE BIT(0) +#define PCIE_REVISION_ID BIT(0) +#define PCIE_CLASS_CODE (0x60400 << 8) + +/* MediaTek specific configuration registers */ +#define PCIE_FTS_NUM 0x70c +#define PCIE_FTS_NUM_MASK GENMASK(15, 8) +#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) + +#define PCIE_FC_CREDIT 0x73c +#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) +#define PCIE_FC_CREDIT_VAL(x) ((x) << 16) + +/* PCIe V2 share registers */ +#define PCIE_SYS_CFG_V2 0x0 +#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) +#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) + +/* PCIe V2 per-port registers */ +#define PCIE_CONF_VEND_ID 0x100 +#define PCIE_CONF_DEVICE_ID 0x102 +#define PCIE_CONF_CLASS_ID 0x106 + +#define PCIE_AHB_TRANS_BASE0_L 0x438 +#define PCIE_AHB_TRANS_BASE0_H 0x43c +#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) +#define PCIE_AXI_WINDOW0 0x448 +#define WIN_ENABLE BIT(7) + +/* + * Define PCIe to AHB window size as 2^33 to support max 8GB address space + * translate, support least 4GB DRAM size access from EP DMA(physical DRAM + * start from 0x40000000). + */ +#define PCIE2AHB_SIZE 0x21 + +/* PCIe V2 configuration transaction header */ +#define PCIE_CFG_HEADER0 0x460 +#define PCIE_CFG_HEADER1 0x464 +#define PCIE_CFG_HEADER2 0x468 +#define PCIE_CFG_WDATA 0x470 +#define PCIE_APP_TLP_REQ 0x488 +#define PCIE_CFG_RDATA 0x48c +#define APP_CFG_REQ BIT(0) +#define APP_CPL_STATUS GENMASK(7, 5) + +#define CFG_WRRD_TYPE_0 4 +#define CFG_WR_FMT 2 +#define CFG_RD_FMT 0 + +#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) +#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) +#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) +#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) +#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) +#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) +#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) +#define CFG_HEADER_DW0(type, fmt) \ + (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) +#define CFG_HEADER_DW1(where, size) \ + (GENMASK(((size) - 1), 0) << ((where) & 0x3)) +#define CFG_HEADER_DW2(regn, fun, dev, bus) \ + (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ + CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) + +#define PCIE_RST_CTRL 0x510 +#define PCIE_PHY_RSTB BIT(0) +#define PCIE_PIPE_SRSTB BIT(1) +#define PCIE_MAC_SRSTB BIT(2) +#define PCIE_CRSTB BIT(3) +#define PCIE_PERSTB BIT(8) +#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) +#define PCIE_LINK_STATUS_V2 0x804 +#define PCIE_PORT_LINKUP_V2 BIT(11) + +#define PCI_VENDOR_ID_MEDIATEK 0x14c3 + +enum MTK_PCIE_GEN {PCIE_V1, PCIE_V2, PCIE_V3}; + +struct mtk_pcie_port { + void __iomem *base; + struct list_head list; + struct mtk_pcie *pcie; + struct reset_ctl reset; + struct clk sys_ck; + struct clk ahb_ck; + struct clk axi_ck; + struct clk aux_ck; + struct clk obff_ck; + struct clk pipe_ck; + struct phy phy; + u32 slot; +}; + +struct mtk_pcie { + void __iomem *base; + void *priv; + struct clk free_ck; + struct list_head ports; +}; + +static int mtk_pcie_config_address(const struct udevice *udev, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct mtk_pcie *pcie = dev_get_priv(udev); + u32 val; + + val = PCI_CONF1_EXT_ADDRESS(PCI_BUS(bdf), PCI_DEV(bdf), + PCI_FUNC(bdf), offset) & ~PCI_CONF1_ENABLE; + writel(val, pcie->base + PCIE_CFG_ADDR); + *paddress = pcie->base + PCIE_CFG_DATA + (offset & 3); + + return 0; +} + +static int mtk_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, mtk_pcie_config_address, + bdf, offset, valuep, size); +} + +static int mtk_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, mtk_pcie_config_address, + bdf, offset, value, size); +} + +static const struct dm_pci_ops mtk_pcie_ops = { + .read_config = mtk_pcie_read_config, + .write_config = mtk_pcie_write_config, +}; + +static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) +{ + u32 val; + int err; + + err = readl_poll_timeout(port->base + PCIE_APP_TLP_REQ, val, + !(val & APP_CFG_REQ), 100 * 1000); + if (err) + return -1; + + if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) + return -1; + + return 0; +} + +static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, pci_dev_t devfn, + int where, int size, ulong *val) +{ + u32 tmp; + + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_DEV(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Trigger h/w to transmit Cfgrd TLP */ + tmp = readl(port->base + PCIE_APP_TLP_REQ); + tmp |= APP_CFG_REQ; + writel(tmp, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + if (mtk_pcie_check_cfg_cpld(port)) + return -1; + + /* Read cpld payload of Cfgrd */ + *val = readl(port->base + PCIE_CFG_RDATA); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return 0; +} + +static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, pci_dev_t devfn, + int where, int size, u32 val) +{ + /* Write PCIe configuration transaction header for Cfgwr */ + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_DEV(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Write Cfgwr data */ + val = val << 8 * (where & 3); + writel(val, port->base + PCIE_CFG_WDATA); + + /* Trigger h/w to transmit Cfgwr TLP */ + val = readl(port->base + PCIE_APP_TLP_REQ); + val |= APP_CFG_REQ; + writel(val, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + return mtk_pcie_check_cfg_cpld(port); +} + +static struct mtk_pcie_port *mtk_pcie_find_port(const struct udevice *bus, + pci_dev_t bdf) +{ + struct mtk_pcie *pcie = dev_get_priv(bus); + struct mtk_pcie_port *port; + struct udevice *dev; + struct pci_child_plat *pplat = NULL; + int ret = 0; + + if (PCI_BUS(bdf) != 0) { + ret = pci_get_bus(PCI_BUS(bdf), &dev); + if (ret) { + debug("No such device,ret = %d\n", ret); + return NULL; + } + + while (dev_seq(dev->parent) != 0) + dev = dev->parent; + + pplat = dev_get_parent_plat(dev); + } + + list_for_each_entry(port, &pcie->ports, list) { + if ((PCI_BUS(bdf) == 0) && (PCI_DEV(bdf) == port->slot)) + return port; + + if (PCI_BUS(bdf) != 0 && PCI_DEV(bdf) == 0 && + PCI_DEV(pplat->devfn) == port->slot) + return port; + } + + return NULL; +} + +static int mtk_pcie_config_read(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct mtk_pcie_port *port; + int ret; + + port = mtk_pcie_find_port(bus, bdf); + if (!port) { + *valuep = pci_get_ff(size); + return 0; + } + + ret = mtk_pcie_hw_rd_cfg(port, PCI_BUS(bdf), bdf, offset, (1 << size), valuep); + if (ret) + *valuep = pci_get_ff(size); + + return ret; +} + +static int mtk_pcie_config_write(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct mtk_pcie_port *port; + + port = mtk_pcie_find_port(bus, bdf); + if (!port) + return 0; + + /* Do not modify RC bar 0/1. */ + if (PCI_BUS(bdf) == 0 && (offset == 0x10 || offset == 0x14)) + return 0; + + return mtk_pcie_hw_wr_cfg(port, PCI_BUS(bdf), bdf, offset, (1 << size), value); +} + +static const struct dm_pci_ops mtk_pcie_ops_v2 = { + .read_config = mtk_pcie_config_read, + .write_config = mtk_pcie_config_write, +}; + +static void mtk_pcie_port_free(struct mtk_pcie_port *port) +{ + list_del(&port->list); + free(port); +} + +static int mtk_pcie_startup_port(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 val; + int err; + + /* assert port PERST_N */ + setbits_le32(pcie->base + PCIE_SYS_CFG, PCIE_PORT_PERST(port->slot)); + /* de-assert port PERST_N */ + clrbits_le32(pcie->base + PCIE_SYS_CFG, PCIE_PORT_PERST(port->slot)); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, + !!(val & PCIE_PORT_LINKUP), 100000); + if (err) + return -ETIMEDOUT; + + /* disable interrupt */ + clrbits_le32(pcie->base + PCIE_INT_ENABLE, + PCIE_PORT_INT_EN(port->slot)); + + /* map to all DDR region. We need to set it before cfg operation. */ + writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, + port->base + PCIE_BAR0_SETUP); + + /* configure class code and revision ID */ + writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); + + /* configure FC credit */ + val = PCI_CONF1_EXT_ADDRESS(0, port->slot, 0, PCIE_FC_CREDIT) & ~PCI_CONF1_ENABLE; + writel(val, pcie->base + PCIE_CFG_ADDR); + clrsetbits_le32(pcie->base + PCIE_CFG_DATA, PCIE_FC_CREDIT_MASK, + PCIE_FC_CREDIT_VAL(0x806c)); + + /* configure RC FTS number to 250 when it leaves L0s */ + val = PCI_CONF1_EXT_ADDRESS(0, port->slot, 0, PCIE_FTS_NUM) & ~PCI_CONF1_ENABLE; + writel(val, pcie->base + PCIE_CFG_ADDR); + clrsetbits_le32(pcie->base + PCIE_CFG_DATA, PCIE_FTS_NUM_MASK, + PCIE_FTS_NUM_L0(0x50)); + + return 0; +} + +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct udevice *dev = pcie->priv; + struct pci_region *pci_mem; + u32 val; + int err; + + /* MT7622/MT7629 platforms need to enable LTSSM and ASPM from PCIe subsys */ + if (pcie->base) { + val = readl(pcie->base + PCIE_SYS_CFG_V2); + val |= PCIE_CSR_LTSSM_EN(port->slot) | + PCIE_CSR_ASPM_L1_EN(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG_V2); + } + + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); + + /* + * Enable PCIe link down reset, if link status changed from link up to + * link down, this will reset MAC control registers and configuration + * space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + udelay(500); + + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PIPE_SRSTB | PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + + mdelay(100); + val |= PCIE_PERSTB; + writel(val, port->base + PCIE_RST_CTRL); + + /* Set up vendor ID and class code */ + val = PCI_VENDOR_ID_MEDIATEK; + writew(val, port->base + PCIE_CONF_VEND_ID); + + val = PCI_CLASS_BRIDGE_PCI; + writew(val, port->base + PCIE_CONF_CLASS_ID); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, + !!(val & PCIE_PORT_LINKUP_V2), + 100 * 1000); + if (err) + return -ETIMEDOUT; + + pci_get_regions(dev, NULL, &pci_mem, NULL); + + /* Set AHB to PCIe translation windows */ + val = lower_32_bits(pci_mem->bus_start) | + AHB2PCIE_SIZE(fls(pci_mem->size) - 1); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); + + val = upper_32_bits(pci_mem->bus_start); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); + + /* Set PCIe to AXI translation memory space.*/ + val = PCIE2AHB_SIZE | WIN_ENABLE; + writel(val, port->base + PCIE_AXI_WINDOW0); + + return 0; +} + +static void mtk_pcie_enable_port(struct mtk_pcie_port *port) +{ + int err; + + err = clk_enable(&port->sys_ck); + if (err) + goto err_sys_clk; + + err = reset_assert(&port->reset); + if (err) + goto err_reset; + + err = reset_deassert(&port->reset); + if (err) + goto err_reset; + + err = generic_phy_init(&port->phy); + if (err) + goto err_phy_init; + + err = generic_phy_power_on(&port->phy); + if (err) + goto err_phy_on; + + if (!mtk_pcie_startup_port(port)) + return; + + pr_err("Port%d link down\n", port->slot); + + generic_phy_power_off(&port->phy); +err_phy_on: + generic_phy_exit(&port->phy); +err_phy_init: +err_reset: + clk_disable(&port->sys_ck); +err_sys_clk: + mtk_pcie_port_free(port); +} + +static void mtk_pcie_enable_port_v2(struct mtk_pcie_port *port) +{ + int err = 0; + + err = clk_enable(&port->sys_ck); + if (err) { + debug("clk_enable(sys_ck) failed: %d\n", err); + goto exit; + } + + err = clk_enable(&port->ahb_ck); + if (err) { + debug("clk_enable(ahb_ck) failed: %d\n", err); + goto exit; + } + + err = clk_enable(&port->aux_ck); + if (err) { + debug("clk_enable(aux_ck) failed: %d\n", err); + goto exit; + } + + err = clk_enable(&port->axi_ck); + if (err) { + debug("clk_enable(axi_ck) failed: %d\n", err); + goto exit; + } + + err = clk_enable(&port->obff_ck); + if (err) { + debug("clk_enable(obff_ck) failed: %d\n", err); + goto exit; + } + + err = clk_enable(&port->pipe_ck); + if (err) { + debug("clk_enable(pipe_ck) failed: %d\n", err); + goto exit; + } + + err = mtk_pcie_startup_port_v2(port); + if (!err) + return; + + pr_err("Port%d link down\n", port->slot); +exit: + mtk_pcie_port_free(port); +} + +static int mtk_pcie_parse_port(struct udevice *dev, u32 slot) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + struct mtk_pcie_port *port; + char name[10]; + int err; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + snprintf(name, sizeof(name), "port%d", slot); + port->base = dev_remap_addr_name(dev, name); + if (!port->base) + return -ENOENT; + + snprintf(name, sizeof(name), "sys_ck%d", slot); + err = clk_get_by_name(dev, name, &port->sys_ck); + if (err) + return err; + + err = reset_get_by_index(dev, slot, &port->reset); + if (err) + return err; + + err = generic_phy_get_by_index(dev, slot, &port->phy); + if (err) + return err; + + port->slot = slot; + port->pcie = pcie; + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int mtk_pcie_parse_port_v2(struct udevice *dev, u32 slot) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + struct mtk_pcie_port *port; + char name[10]; + int err; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + snprintf(name, sizeof(name), "port%d", slot); + port->base = dev_remap_addr_name(dev, name); + if (!port->base) { + debug("failed to map port%d base\n", slot); + return -ENOENT; + } + + snprintf(name, sizeof(name), "sys_ck%d", slot); + err = clk_get_by_name(dev, name, &port->sys_ck); + if (err) { + debug("clk_get_by_name(sys_ck) failed: %d\n", err); + return err; + } + + snprintf(name, sizeof(name), "ahb_ck%d", slot); + err = clk_get_by_name(dev, name, &port->ahb_ck); + if (err) { + debug("clk_get_by_name(ahb_ck) failed: %d\n", err); + return err; + } + + snprintf(name, sizeof(name), "aux_ck%d", slot); + err = clk_get_by_name(dev, name, &port->aux_ck); + if (err) { + debug("clk_get_by_name(aux_ck) failed: %d\n", err); + return err; + } + + snprintf(name, sizeof(name), "axi_ck%d", slot); + err = clk_get_by_name(dev, name, &port->axi_ck); + if (err) { + debug("clk_get_by_name(axi_ck) failed: %d\n", err); + return err; + } + + snprintf(name, sizeof(name), "obff_ck%d", slot); + err = clk_get_by_name(dev, name, &port->obff_ck); + if (err) { + debug("clk_get_by_name(obff_ck) failed: %d\n", err); + return err; + } + + snprintf(name, sizeof(name), "pipe_ck%d", slot); + err = clk_get_by_name(dev, name, &port->pipe_ck); + if (err) { + debug("clk_get_by_name(pipe_ck) failed: %d\n", err); + return err; + } + + port->slot = slot; + port->pcie = pcie; + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int mtk_pcie_probe(struct udevice *dev) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + struct mtk_pcie_port *port, *tmp; + ofnode subnode; + int err; + + INIT_LIST_HEAD(&pcie->ports); + + pcie->base = dev_remap_addr_name(dev, "subsys"); + if (!pcie->base) + return -ENOENT; + + err = clk_get_by_name(dev, "free_ck", &pcie->free_ck); + if (err) + return err; + + /* enable top level clock */ + err = clk_enable(&pcie->free_ck); + if (err) + return err; + + dev_for_each_subnode(subnode, dev) { + struct fdt_pci_addr addr; + u32 slot = 0; + + if (!ofnode_is_enabled(subnode)) + continue; + + err = ofnode_read_pci_addr(subnode, 0, "reg", &addr, NULL); + if (err) + return err; + + slot = PCI_DEV(addr.phys_hi); + + err = mtk_pcie_parse_port(dev, slot); + if (err) + return err; + } + + /* enable each port, and then check link status */ + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_port(port); + + return 0; +} + +static int mtk_pcie_probe_v2(struct udevice *dev) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + struct mtk_pcie_port *port, *tmp; + struct fdt_pci_addr addr; + ofnode subnode; + unsigned int slot; + int err; + + INIT_LIST_HEAD(&pcie->ports); + + pcie->base = dev_remap_addr_name(dev, "subsys"); + if (!pcie->base) + return -ENOENT; + + pcie->priv = dev; + + dev_for_each_subnode(subnode, dev) { + if (!ofnode_is_enabled(subnode)) + continue; + + err = ofnode_read_pci_addr(subnode, 0, "reg", &addr, NULL); + if (err) + return err; + + slot = PCI_DEV(addr.phys_hi); + err = mtk_pcie_parse_port_v2(dev, slot); + if (err) + return err; + } + + /* enable each port, and then check link status */ + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_port_v2(port); + + return 0; +} + +static const struct udevice_id mtk_pcie_ids[] = { + { .compatible = "mediatek,mt7623-pcie", PCIE_V1}, + { } +}; + +U_BOOT_DRIVER(pcie_mediatek_v1) = { + .name = "pcie_mediatek_v1", + .id = UCLASS_PCI, + .of_match = mtk_pcie_ids, + .ops = &mtk_pcie_ops, + .probe = mtk_pcie_probe, + .priv_auto = sizeof(struct mtk_pcie), +}; + +static const struct udevice_id mtk_pcie_ids_v2[] = { + { .compatible = "mediatek,mt7622-pcie", PCIE_V2}, + { } +}; + +U_BOOT_DRIVER(pcie_mediatek_v2) = { + .name = "pcie_mediatek_v2", + .id = UCLASS_PCI, + .of_match = mtk_pcie_ids_v2, + .ops = &mtk_pcie_ops_v2, + .probe = mtk_pcie_probe_v2, + .priv_auto = sizeof(struct mtk_pcie), +}; diff --git a/drivers/pci/pcie_mediatek_gen3.c b/drivers/pci/pcie_mediatek_gen3.c new file mode 100644 index 00000000000..0149edae0bf --- /dev/null +++ b/drivers/pci/pcie_mediatek_gen3.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2023 John Crispin <john@phrozen.org> + * Driver is based on u-boot gen1/2 and upstream linux gen3 code + */ + +#include <clk.h> +#include <dm.h> +#include <generic-phy.h> +#include <log.h> +#include <malloc.h> +#include <pci.h> +#include <reset.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/list.h> +#include "pci_internal.h" + +/* PCIe shared registers */ +#define PCIE_CFG_ADDR 0x20 +#define PCIE_CFG_DATA 0x24 + +#define PCIE_SETTING_REG 0x80 + +#define PCIE_PCI_IDS_1 0x9c +#define PCIE_RC_MODE BIT(0) +#define PCI_CLASS(class) ((class) << 8) + +#define PCIE_CFGNUM_REG 0x140 +#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0)) +#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8)) +#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16)) +#define PCIE_CFG_FORCE_BYTE_EN BIT(20) +#define PCIE_CFG_OFFSET_ADDR 0x1000 +#define PCIE_CFG_HEADER(bus, devfn) (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn)) + +#define PCIE_RST_CTRL_REG 0x148 +#define PCIE_MAC_RSTB BIT(0) +#define PCIE_PHY_RSTB BIT(1) +#define PCIE_BRG_RSTB BIT(2) +#define PCIE_PE_RSTB BIT(3) + +#define PCIE_LINK_STATUS_REG 0x154 +#define PCIE_PORT_LINKUP BIT(8) + +#define PCIE_INT_ENABLE_REG 0x180 + +#define PCIE_MISC_CTRL_REG 0x348 +#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1) + +#define PCIE_TRANS_TABLE_BASE_REG 0x800 +#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 +#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 +#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc +#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10 +#define PCIE_ATR_TLB_SET_OFFSET 0x20 + +#define PCIE_MAX_TRANS_TABLES 8 +#define PCIE_ATR_EN BIT(0) +#define PCIE_ATR_SIZE(size) \ + (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN) +#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0)) +#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0) +#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1) +#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16)) +#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0) +#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2) + +struct mtk_pcie { + void __iomem *base; + void *priv; + struct clk pl_250m_ck; + struct clk tl_26m_ck; + struct clk peri_26m_ck; + struct clk top_133m_ck; + struct reset_ctl reset_phy; + struct reset_ctl reset_mac; + struct phy phy; +}; + +static void mtk_pcie_config_tlp_header(const struct udevice *bus, + pci_dev_t devfn, + int where, int size) +{ + struct mtk_pcie *pcie = dev_get_priv(bus); + int bytes; + u32 val; + + size = 1 << size; + bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); + + val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) | + PCIE_CFG_HEADER(PCI_BUS(devfn), (devfn >> 8)); + + writel(val, pcie->base + PCIE_CFGNUM_REG); +} + +static int mtk_pcie_config_address(const struct udevice *udev, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct mtk_pcie *pcie = dev_get_priv(udev); + + *paddress = pcie->base + PCIE_CFG_OFFSET_ADDR + offset; + + return 0; +} + +static int mtk_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + int ret; + + mtk_pcie_config_tlp_header(bus, bdf, offset, size); + ret = pci_generic_mmap_read_config(bus, mtk_pcie_config_address, + bdf, offset, valuep, size); + return ret; +} + +static int mtk_pcie_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + mtk_pcie_config_tlp_header(bus, bdf, offset, size); + + switch (size) { + case PCI_SIZE_8: + case PCI_SIZE_16: + value <<= (offset & 0x3) * 8; + case PCI_SIZE_32: + break; + default: + return -EINVAL; + } + + return pci_generic_mmap_write_config(bus, mtk_pcie_config_address, + bdf, (offset & ~0x3), value, PCI_SIZE_32); +} + +static const struct dm_pci_ops mtk_pcie_ops = { + .read_config = mtk_pcie_read_config, + .write_config = mtk_pcie_write_config, +}; + +static int mtk_pcie_set_trans_table(struct udevice *dev, struct mtk_pcie *pcie, + u64 cpu_addr, u64 pci_addr, u64 size, + unsigned long type, int num) +{ + void __iomem *table; + u32 val; + + if (num >= PCIE_MAX_TRANS_TABLES) { + dev_err(dev, "not enough translate table for addr: %#llx, limited to [%d]\n", + (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); + return -ENODEV; + } + + dev_dbg(dev, "set trans table %d: %#llx %#llx, %#llx\n", num, cpu_addr, + pci_addr, size); + table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + + num * PCIE_ATR_TLB_SET_OFFSET; + + writel(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), table); + writel(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); + writel(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); + writel(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); + + if (type == PCI_REGION_IO) + val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; + else + val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; + writel(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); + + return 0; +} + +static int mtk_pcie_startup_port(struct udevice *dev) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + u32 val; + int i, err; + + /* Set as RC mode */ + val = readl(pcie->base + PCIE_SETTING_REG); + val |= PCIE_RC_MODE; + writel(val, pcie->base + PCIE_SETTING_REG); + + /* setup RC BARs */ + writel(PCI_BASE_ADDRESS_MEM_TYPE_64, + pcie->base + PCI_BASE_ADDRESS_0); + writel(0x0, pcie->base + PCI_BASE_ADDRESS_1); + + /* setup interrupt pins */ + clrsetbits_le32(pcie->base + PCI_INTERRUPT_LINE, + 0xff00, 0x100); + + /* setup bus numbers */ + clrsetbits_le32(pcie->base + PCI_PRIMARY_BUS, + 0xffffff, 0x00ff0100); + + /* setup command register */ + clrsetbits_le32(pcie->base + PCI_PRIMARY_BUS, + 0xffff, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR); + + /* Set class code */ + val = readl(pcie->base + PCIE_PCI_IDS_1); + val &= ~GENMASK(31, 8); + val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); + writel(val, pcie->base + PCIE_PCI_IDS_1); + + /* Mask all INTx interrupts */ + val = readl(pcie->base + PCIE_INT_ENABLE_REG); + val &= ~0xFF000000; + writel(val, pcie->base + PCIE_INT_ENABLE_REG); + + /* Disable DVFSRC voltage request */ + val = readl(pcie->base + PCIE_MISC_CTRL_REG); + val |= PCIE_DISABLE_DVFSRC_VLT_REQ; + writel(val, pcie->base + PCIE_MISC_CTRL_REG); + + /* Assert all reset signals */ + val = readl(pcie->base + PCIE_RST_CTRL_REG); + val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; + writel(val, pcie->base + PCIE_RST_CTRL_REG); + + /* + * Described in PCIe CEM specification sections 2.2 (PERST# Signal) + * and 2.2.1 (Initial Power-Up (G3 to S0)). + * The deassertion of PERST# should be delayed 100ms (TPVPERL) + * for the power and clock to become stable. + */ + mdelay(100); + + /* De-assert reset signals */ + val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB); + writel(val, pcie->base + PCIE_RST_CTRL_REG); + + mdelay(100); + + /* De-assert PERST# signals */ + val &= ~(PCIE_PE_RSTB); + writel(val, pcie->base + PCIE_RST_CTRL_REG); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, + !!(val & PCIE_PORT_LINKUP), + 100 * 1000); + if (err) { + dev_dbg(dev, "no card detected\n"); + return -ETIMEDOUT; + } + dev_dbg(dev, "detected a card\n"); + + for (i = 0; i < hose->region_count; i++) { + struct pci_region *reg = &hose->regions[i]; + + if (reg->flags != PCI_REGION_MEM) + continue; + + mtk_pcie_set_trans_table(dev, pcie, reg->bus_start, reg->phys_start, + reg->size, reg->flags, 0); + } + + return 0; +} + +static int mtk_pcie_power_on(struct udevice *dev) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + int err; + + pcie->base = dev_remap_addr_name(dev, "pcie-mac"); + if (!pcie->base) + return -ENOENT; + + pcie->priv = dev; + + /* pcie-phy is optional (mt7988 doesn't need it) */ + generic_phy_get_by_name(dev, "pcie-phy", &pcie->phy); + + /* + * Upstream linux kernel devine these clock without clock-names + * and use clk bulk API to enable them all. + */ + err = clk_get_by_index(dev, 0, &pcie->pl_250m_ck); + if (err) + return err; + + err = clk_get_by_index(dev, 1, &pcie->tl_26m_ck); + if (err) + return err; + + err = clk_get_by_index(dev, 2, &pcie->peri_26m_ck); + if (err) + return err; + + err = clk_get_by_index(dev, 3, &pcie->top_133m_ck); + if (err) + return err; + + if (pcie->phy.dev) { + err = generic_phy_init(&pcie->phy); + if (err) + return err; + + err = generic_phy_power_on(&pcie->phy); + if (err) + goto err_phy_on; + } + + err = clk_enable(&pcie->pl_250m_ck); + if (err) + goto err_clk_pl_250m; + + err = clk_enable(&pcie->tl_26m_ck); + if (err) + goto err_clk_tl_26m; + + err = clk_enable(&pcie->peri_26m_ck); + if (err) + goto err_clk_peri_26m; + + err = clk_enable(&pcie->top_133m_ck); + if (err) + goto err_clk_top_133m; + + err = mtk_pcie_startup_port(dev); + if (err) + goto err_startup; + + return 0; + +err_startup: +err_clk_top_133m: + clk_disable(&pcie->top_133m_ck); +err_clk_peri_26m: + clk_disable(&pcie->peri_26m_ck); +err_clk_tl_26m: + clk_disable(&pcie->tl_26m_ck); +err_clk_pl_250m: + clk_disable(&pcie->pl_250m_ck); +err_phy_on: + if (pcie->phy.dev) + generic_phy_exit(&pcie->phy); + + return err; +} + +static int mtk_pcie_probe(struct udevice *dev) +{ + struct mtk_pcie *pcie = dev_get_priv(dev); + int err; + + pcie->priv = dev; + + err = mtk_pcie_power_on(dev); + if (err) + return err; + + return 0; +} + +static const struct udevice_id mtk_pcie_ids[] = { + { .compatible = "mediatek,mt8192-pcie" }, + { } +}; + +U_BOOT_DRIVER(pcie_mediatek_gen3) = { + .name = "pcie_mediatek_gen3", + .id = UCLASS_PCI, + .of_match = mtk_pcie_ids, + .ops = &mtk_pcie_ops, + .probe = mtk_pcie_probe, + .priv_auto = sizeof(struct mtk_pcie), +}; diff --git a/drivers/pci/pcie_octeon.c b/drivers/pci/pcie_octeon.c new file mode 100644 index 00000000000..3b28bd81439 --- /dev/null +++ b/drivers/pci/pcie_octeon.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Stefan Roese <sr@denx.de> + */ + +#include <dm.h> +#include <errno.h> +#include <fdtdec.h> +#include <log.h> +#include <pci.h> +#include <linux/delay.h> + +#include <mach/octeon-model.h> +#include <mach/octeon_pci.h> +#include <mach/cvmx-regs.h> +#include <mach/cvmx-pcie.h> +#include <mach/cvmx-pemx-defs.h> + +struct octeon_pcie { + void *base; + int first_busno; + u32 port; + struct udevice *dev; + int pcie_port; +}; + +static bool octeon_bdf_invalid(pci_dev_t bdf, int first_busno) +{ + /* + * In PCIe only a single device (0) can exist on the local bus. + * Beyound the local bus, there might be a switch and everything + * is possible. + */ + if ((PCI_BUS(bdf) == first_busno) && (PCI_DEV(bdf) > 0)) + return true; + + return false; +} + +static int pcie_octeon_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct octeon_pcie *pcie = dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + int busno; + int port; + + debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + debug("(addr,size,val)=(0x%04x, %d, 0x%08lx)\n", offset, size, value); + + port = pcie->pcie_port; + busno = PCI_BUS(bdf) - hose->first_busno + 1; + + switch (size) { + case PCI_SIZE_8: + cvmx_pcie_config_write8(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset, value); + break; + case PCI_SIZE_16: + cvmx_pcie_config_write16(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset, value); + break; + case PCI_SIZE_32: + cvmx_pcie_config_write32(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset, value); + break; + default: + printf("Invalid size\n"); + }; + + return 0; +} + +static int pcie_octeon_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct octeon_pcie *pcie = dev_get_priv(bus); + struct pci_controller *hose = dev_get_uclass_priv(bus); + int busno; + int port; + + port = pcie->pcie_port; + busno = PCI_BUS(bdf) - hose->first_busno + 1; + if (octeon_bdf_invalid(bdf, pcie->first_busno)) { + *valuep = pci_get_ff(size); + return 0; + } + + switch (size) { + case PCI_SIZE_8: + *valuep = cvmx_pcie_config_read8(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset); + break; + case PCI_SIZE_16: + *valuep = cvmx_pcie_config_read16(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset); + break; + case PCI_SIZE_32: + *valuep = cvmx_pcie_config_read32(port, busno, PCI_DEV(bdf), + PCI_FUNC(bdf), offset); + break; + default: + printf("Invalid size\n"); + }; + + debug("%02x.%02x.%02x: u%d %x -> %lx\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, *valuep); + + return 0; +} + +static int pcie_octeon_probe(struct udevice *dev) +{ + struct octeon_pcie *pcie = dev_get_priv(dev); + int node = cvmx_get_node_num(); + int pcie_port; + int ret = 0; + + /* Get port number, lane number and memory target / attr */ + if (ofnode_read_u32(dev_ofnode(dev), "marvell,pcie-port", + &pcie->port)) { + ret = -ENODEV; + goto err; + } + + pcie->first_busno = dev_seq(dev); + pcie_port = ((node << 4) | pcie->port); + ret = cvmx_pcie_rc_initialize(pcie_port); + if (ret != 0) + return ret; + + return 0; + +err: + return ret; +} + +static const struct dm_pci_ops pcie_octeon_ops = { + .read_config = pcie_octeon_read_config, + .write_config = pcie_octeon_write_config, +}; + +static const struct udevice_id pcie_octeon_ids[] = { + { .compatible = "marvell,pcie-host-octeon" }, + { } +}; + +U_BOOT_DRIVER(pcie_octeon) = { + .name = "pcie_octeon", + .id = UCLASS_PCI, + .of_match = pcie_octeon_ids, + .ops = &pcie_octeon_ops, + .probe = pcie_octeon_probe, + .priv_auto = sizeof(struct octeon_pcie), + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/pci/pcie_phytium.c b/drivers/pci/pcie_phytium.c new file mode 100644 index 00000000000..94de89bcad7 --- /dev/null +++ b/drivers/pci/pcie_phytium.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium PCIE host driver + * + * Heavily based on drivers/pci/pcie_xilinx.c + * + * Copyright (C) 2019 + */ + +#include <dm.h> +#include <pci.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <linux/printk.h> + +/** + * struct phytium_pcie - phytium PCIe controller state + * @cfg_base: The base address of memory mapped configuration space + */ +struct phytium_pcie { + void *cfg_base; +}; + +/* + * phytium_pci_skip_dev() + * @parent: Identifies the PCIe device to access + * + * Checks whether the parent of the PCIe device is bridge + * + * Return: true if it is bridge, else false. + */ +static int phytium_pci_skip_dev(pci_dev_t parent) +{ + unsigned char pos, id; + unsigned long addr = 0x40000000; + unsigned short capreg; + unsigned char port_type; + + addr += PCIE_ECAM_OFFSET(PCI_BUS(parent), PCI_DEV(parent), PCI_FUNC(parent), 0); + + pos = 0x34; + while (1) { + pos = readb(addr + pos); + if (pos < 0x40) + break; + pos &= ~3; + id = readb(addr + pos); + if (id == 0xff) + break; + if (id == 0x10) { + capreg = readw(addr + pos + 2); + port_type = (capreg >> 4) & 0xf; + if (port_type == 0x6 || port_type == 0x4) + return 1; + else + return 0; + } + pos += 1; + } + return 0; +} + +/** + * pci_phytium_conf_address() - Calculate the address of a config access + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @paddress: Pointer to the pointer to write the calculates address to + * + * Calculates the address that should be accessed to perform a PCIe + * configuration space access for a given device identified by the PCIe + * controller device @pcie and the bus, device & function numbers in @bdf. If + * access to the device is not valid then the function will return an error + * code. Otherwise the address to access will be written to the pointer pointed + * to by @paddress. + */ +static int pci_phytium_conf_address(const struct udevice *bus, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct phytium_pcie *pcie = dev_get_priv(bus); + void *addr; + pci_dev_t bdf_parent; + + unsigned int bus_no = PCI_BUS(bdf); + unsigned int dev_no = PCI_DEV(bdf); + + bdf_parent = PCI_BDF((bus_no - 1), 0, 0); + + addr = pcie->cfg_base; + addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), 0); + + if (bus_no > 0 && dev_no > 0) { + if ((readb(addr + PCI_HEADER_TYPE) & 0x7f) != + PCI_HEADER_TYPE_BRIDGE) + return -ENODEV; + if (phytium_pci_skip_dev(bdf_parent)) + return -ENODEV; + } + + addr += offset; + *paddress = addr; + + return 0; +} + +/** + * pci_phytium_read_config() - Read from configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + */ +static int pci_phytium_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, pci_phytium_conf_address, + bdf, offset, valuep, size); +} + +/** + * pci_phytium_write_config() - Write to configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + */ +static int pci_phytium_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, pci_phytium_conf_address, + bdf, offset, value, size); +} + +/** + * pci_phytium_of_to_plat() - Translate from DT to device state + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pci_phytium_of_to_plat(struct udevice *dev) +{ + struct phytium_pcie *pcie = dev_get_priv(dev); + struct fdt_resource reg_res; + + DECLARE_GLOBAL_DATA_PTR; + + int err; + + err = fdt_get_resource(gd->fdt_blob, dev_of_offset(dev), "reg", + 0, ®_res); + if (err < 0) { + pr_err("\"reg\" resource not found\n"); + return err; + } + + pcie->cfg_base = map_physmem(reg_res.start, + fdt_resource_size(®_res), + MAP_NOCACHE); + + return 0; +} + +static const struct dm_pci_ops pci_phytium_ops = { + .read_config = pci_phytium_read_config, + .write_config = pci_phytium_write_config, +}; + +static const struct udevice_id pci_phytium_ids[] = { + { .compatible = "phytium,pcie-host-1.0" }, + { } +}; + +U_BOOT_DRIVER(pci_phytium) = { + .name = "pci_phytium", + .id = UCLASS_PCI, + .of_match = pci_phytium_ids, + .ops = &pci_phytium_ops, + .of_to_plat = pci_phytium_of_to_plat, + .priv_auto = sizeof(struct phytium_pcie), +}; diff --git a/drivers/pci/pcie_plda_common.c b/drivers/pci/pcie_plda_common.c new file mode 100644 index 00000000000..622a5cee109 --- /dev/null +++ b/drivers/pci/pcie_plda_common.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PLDA XpressRich PCIe host controller common functions. + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * + */ + +#include <clk.h> +#include <dm.h> +#include <pci.h> +#include <pci_ids.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include "pcie_plda_common.h" + +static bool plda_pcie_addr_valid(struct pcie_plda *plda, pci_dev_t bdf) +{ + /* + * Single device limitation. + * PCIe controller contain HW issue that secondary bus of + * host bridge emumerate duplicate devices. + * Only can access device 0 in secondary bus. + */ + if (PCI_BUS(bdf) == plda->sec_busno && PCI_DEV(bdf) > 0) + return false; + + return true; +} + +static int plda_pcie_conf_address(const struct udevice *udev, pci_dev_t bdf, + uint offset, void **paddr) +{ + struct pcie_plda *priv = dev_get_priv(udev); + int where = PCIE_ECAM_OFFSET(PCI_BUS(bdf) - dev_seq(udev), + PCI_DEV(bdf), PCI_FUNC(bdf), offset); + + if (!plda_pcie_addr_valid(priv, bdf)) + return -ENODEV; + + *paddr = (void *)(priv->cfg_base + where); + return 0; +} + +int plda_pcie_config_read(const struct udevice *udev, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(udev, plda_pcie_conf_address, + bdf, offset, valuep, size); +} + +int plda_pcie_config_write(struct udevice *udev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct pcie_plda *priv = dev_get_priv(udev); + int ret; + + ret = pci_generic_mmap_write_config(udev, plda_pcie_conf_address, + bdf, offset, value, size); + + /* record secondary bus number */ + if (!ret && PCI_BUS(bdf) == dev_seq(udev) && + PCI_DEV(bdf) == 0 && PCI_FUNC(bdf) == 0 && + (offset == PCI_SECONDARY_BUS || + (offset == PCI_PRIMARY_BUS && size != PCI_SIZE_8))) { + priv->sec_busno = + ((offset == PCI_PRIMARY_BUS) ? (value >> 8) : value) & 0xff; + priv->sec_busno += dev_seq(udev); + debug("Secondary bus number was changed to %d\n", + priv->sec_busno); + } + return ret; +} + +int plda_pcie_set_atr_entry(struct pcie_plda *plda, phys_addr_t src_addr, + phys_addr_t trsl_addr, phys_size_t window_size, + int trsl_param) +{ + void __iomem *base = + plda->reg_base + XR3PCI_ATR_AXI4_SLV0; + + /* Support AXI4 Slave 0 Address Translation Tables 0-7. */ + if (plda->atr_table_num >= XR3PCI_ATR_MAX_TABLE_NUM) { + dev_err(plda->dev, "ATR table number %d exceeds max num\n", + plda->atr_table_num); + return -EINVAL; + } + base += XR3PCI_ATR_TABLE_OFFSET * plda->atr_table_num; + plda->atr_table_num++; + + /* + * X3PCI_ATR_SRC_ADDR_LOW: + * - bit 0: enable entry, + * - bits 1-6: ATR window size: total size in bytes: 2^(ATR_WSIZE + 1) + * - bits 7-11: reserved + * - bits 12-31: start of source address + */ + writel((lower_32_bits(src_addr) & XR3PCI_ATR_SRC_ADDR_MASK) | + (fls(window_size) - 1) << XR3PCI_ATR_SRC_WIN_SIZE_SHIFT | 1, + base + XR3PCI_ATR_SRC_ADDR_LOW); + writel(upper_32_bits(src_addr), base + XR3PCI_ATR_SRC_ADDR_HIGH); + writel((lower_32_bits(trsl_addr) & XR3PCI_ATR_TRSL_ADDR_MASK), + base + XR3PCI_ATR_TRSL_ADDR_LOW); + writel(upper_32_bits(trsl_addr), base + XR3PCI_ATR_TRSL_ADDR_HIGH); + writel(trsl_param, base + XR3PCI_ATR_TRSL_PARAM); + + dev_dbg(plda->dev, "ATR entry: 0x%010llx %s 0x%010llx [0x%010llx] (param: 0x%06x)\n", + src_addr, (trsl_param & XR3PCI_ATR_TRSL_DIR) ? "<-" : "->", + trsl_addr, (u64)window_size, trsl_param); + return 0; +} diff --git a/drivers/pci/pcie_plda_common.h b/drivers/pci/pcie_plda_common.h new file mode 100644 index 00000000000..409949f5342 --- /dev/null +++ b/drivers/pci/pcie_plda_common.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * Author: Minda Chen <minda.chen@starfivetech.com> + * + */ + +#ifndef PCIE_PLDA_COMMON_H +#define PCIE_PLDA_COMMON_H + +#define GEN_SETTINGS 0x80 +#define PCIE_PCI_IDS 0x9C +#define PCIE_WINROM 0xFC +#define PMSG_SUPPORT_RX 0x3F0 +#define PCI_MISC 0xB4 + +#define PLDA_EP_ENABLE 0 +#define PLDA_RP_ENABLE 1 + +#define IDS_CLASS_CODE_SHIFT 8 + +#define PREF_MEM_WIN_64_SUPPORT BIT(3) +#define PMSG_LTR_SUPPORT BIT(2) +#define PLDA_FUNCTION_DIS BIT(15) +#define PLDA_FUNC_NUM 4 +#define PLDA_PHY_FUNC_SHIFT 9 + +#define XR3PCI_ATR_AXI4_SLV0 0x800 +#define XR3PCI_ATR_SRC_ADDR_LOW 0x0 +#define XR3PCI_ATR_SRC_ADDR_HIGH 0x4 +#define XR3PCI_ATR_TRSL_ADDR_LOW 0x8 +#define XR3PCI_ATR_TRSL_ADDR_HIGH 0xc +#define XR3PCI_ATR_TRSL_PARAM 0x10 +#define XR3PCI_ATR_TABLE_OFFSET 0x20 +#define XR3PCI_ATR_MAX_TABLE_NUM 8 + +#define XR3PCI_ATR_SRC_WIN_SIZE_SHIFT 1 +#define XR3PCI_ATR_SRC_ADDR_MASK GENMASK(31, 12) +#define XR3PCI_ATR_TRSL_ADDR_MASK GENMASK(31, 12) +#define XR3PCI_ATR_TRSL_DIR BIT(22) +/* IDs used in the XR3PCI_ATR_TRSL_PARAM */ +#define XR3PCI_ATR_TRSLID_PCIE_MEMORY 0x0 +#define XR3PCI_ATR_TRSLID_PCIE_CONFIG 0x1 + +/** + * struct pcie_plda - PLDA PCIe controller state + * + * @reg_base: The base address of controller register space + * @cfg_base: The base address of configuration space + * @cfg_size: The size of configuration space + * @sec_busno: Secondary bus number. + * @atr_table_num: Total ATR table numbers. + */ +struct pcie_plda { + struct udevice *dev; + void __iomem *reg_base; + void __iomem *cfg_base; + phys_size_t cfg_size; + int sec_busno; + int atr_table_num; +}; + +int plda_pcie_config_read(const struct udevice *udev, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size); +int plda_pcie_config_write(struct udevice *udev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size); +int plda_pcie_set_atr_entry(struct pcie_plda *plda, phys_addr_t src_addr, + phys_addr_t trsl_addr, phys_size_t window_size, + int trsl_param); + +static inline void plda_pcie_enable_root_port(struct pcie_plda *plda) +{ + u32 value; + + value = readl(plda->reg_base + GEN_SETTINGS); + value |= PLDA_RP_ENABLE; + writel(value, plda->reg_base + GEN_SETTINGS); +} + +static inline void plda_pcie_set_standard_class(struct pcie_plda *plda) +{ + u32 value; + + value = readl(plda->reg_base + PCIE_PCI_IDS); + value &= 0xff; + value |= (PCI_CLASS_BRIDGE_PCI_NORMAL << IDS_CLASS_CODE_SHIFT); + writel(value, plda->reg_base + PCIE_PCI_IDS); +} + +static inline void plda_pcie_set_pref_win_64bit(struct pcie_plda *plda) +{ + u32 value; + + value = readl(plda->reg_base + PCIE_WINROM); + value |= PREF_MEM_WIN_64_SUPPORT; + writel(value, plda->reg_base + PCIE_WINROM); +} + +static inline void plda_pcie_disable_ltr(struct pcie_plda *plda) +{ + u32 value; + + value = readl(plda->reg_base + PMSG_SUPPORT_RX); + value &= ~PMSG_LTR_SUPPORT; + writel(value, plda->reg_base + PMSG_SUPPORT_RX); +} + +static inline void plda_pcie_disable_func(struct pcie_plda *plda) +{ + u32 value; + + value = readl(plda->reg_base + PCI_MISC); + value |= PLDA_FUNCTION_DIS; + writel(value, plda->reg_base + PCI_MISC); +} +#endif diff --git a/drivers/pci/pcie_rockchip.c b/drivers/pci/pcie_rockchip.c new file mode 100644 index 00000000000..19f9e58a640 --- /dev/null +++ b/drivers/pci/pcie_rockchip.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe host controller driver + * + * Copyright (c) 2016 Rockchip, Inc. + * Copyright (c) 2020 Amarula Solutions(India) + * Copyright (c) 2020 Jagan Teki <jagan@amarulasolutions.com> + * Copyright (c) 2019 Patrick Wildt <patrick@blueri.se> + * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org> + * + * Bits taken from Linux Rockchip PCIe host controller. + */ + +#include <dm.h> +#include <dm/device_compat.h> +#include <generic-phy.h> +#include <pci.h> +#include <power/regulator.h> +#include <reset.h> +#include <asm-generic/gpio.h> +#include <linux/iopoll.h> + +#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) +#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) + +#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) +#define PCIE_CLIENT_BASE 0x0 +#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) +#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) +#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) +#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) +#define PCIE_CLIENT_BASIC_STATUS1 0x0048 +#define PCIE_CLIENT_LINK_STATUS_UP GENMASK(21, 20) +#define PCIE_CLIENT_LINK_STATUS_MASK GENMASK(21, 20) +#define PCIE_LINK_UP(x) \ + (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) +#define PCIE_RC_NORMAL_BASE 0x800000 +#define PCIE_LM_BASE 0x900000 +#define PCIE_LM_VENDOR_ID (PCIE_LM_BASE + 0x44) +#define PCIE_LM_VENDOR_ROCKCHIP 0x1d87 +#define PCIE_LM_RCBAR (PCIE_LM_BASE + 0x300) +#define PCIE_LM_RCBARPIE BIT(19) +#define PCIE_LM_RCBARPIS BIT(20) +#define PCIE_RC_BASE 0xa00000 +#define PCIE_RC_CONFIG_DCR (PCIE_RC_BASE + 0x0c4) +#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 +#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +#define PCIE_RC_PCIE_LCAP (PCIE_RC_BASE + 0x0cc) +#define PCIE_RC_PCIE_LCAP_APMS_L0S BIT(10) +#define PCIE_ATR_BASE 0xc00000 +#define PCIE_ATR_OB_ADDR0(i) (PCIE_ATR_BASE + 0x000 + (i) * 0x20) +#define PCIE_ATR_OB_ADDR1(i) (PCIE_ATR_BASE + 0x004 + (i) * 0x20) +#define PCIE_ATR_OB_DESC0(i) (PCIE_ATR_BASE + 0x008 + (i) * 0x20) +#define PCIE_ATR_OB_DESC1(i) (PCIE_ATR_BASE + 0x00c + (i) * 0x20) +#define PCIE_ATR_IB_ADDR0(i) (PCIE_ATR_BASE + 0x800 + (i) * 0x8) +#define PCIE_ATR_IB_ADDR1(i) (PCIE_ATR_BASE + 0x804 + (i) * 0x8) +#define PCIE_ATR_HDR_MEM 0x2 +#define PCIE_ATR_HDR_IO 0x6 +#define PCIE_ATR_HDR_CFG_TYPE0 0xa +#define PCIE_ATR_HDR_CFG_TYPE1 0xb +#define PCIE_ATR_HDR_RID BIT(23) + +#define PCIE_ATR_OB_REGION0_SIZE (32 * 1024 * 1024) +#define PCIE_ATR_OB_REGION_SIZE (1 * 1024 * 1024) + +struct rockchip_pcie { + fdt_addr_t axi_base; + fdt_addr_t apb_base; + int first_busno; + struct udevice *dev; + + /* resets */ + struct reset_ctl core_rst; + struct reset_ctl mgmt_rst; + struct reset_ctl mgmt_sticky_rst; + struct reset_ctl pipe_rst; + struct reset_ctl pm_rst; + struct reset_ctl pclk_rst; + struct reset_ctl aclk_rst; + + /* gpio */ + struct gpio_desc ep_gpio; + + /* vpcie regulators */ + struct udevice *vpcie12v; + struct udevice *vpcie3v3; + struct udevice *vpcie1v8; + struct udevice *vpcie0v9; + + /* phy */ + struct phy pcie_phy; +}; + +static int rockchip_pcie_rd_conf(const struct udevice *udev, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + struct rockchip_pcie *priv = dev_get_priv(udev); + unsigned int bus = PCI_BUS(bdf); + unsigned int dev = PCI_DEV(bdf); + int where = PCIE_ECAM_OFFSET(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset & ~0x3); + ulong value; + + if (bus == priv->first_busno && dev == 0) { + value = readl(priv->apb_base + PCIE_RC_NORMAL_BASE + where); + *valuep = pci_conv_32_to_size(value, offset, size); + return 0; + } + + if ((bus == priv->first_busno + 1) && dev == 0) { + value = readl(priv->axi_base + where); + *valuep = pci_conv_32_to_size(value, offset, size); + return 0; + } + + *valuep = pci_get_ff(size); + + return 0; +} + +static int rockchip_pcie_wr_conf(struct udevice *udev, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + struct rockchip_pcie *priv = dev_get_priv(udev); + unsigned int bus = PCI_BUS(bdf); + unsigned int dev = PCI_DEV(bdf); + int where = PCIE_ECAM_OFFSET(PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), offset & ~0x3); + ulong old; + + if (bus == priv->first_busno && dev == 0) { + old = readl(priv->apb_base + PCIE_RC_NORMAL_BASE + where); + value = pci_conv_size_to_32(old, value, offset, size); + writel(value, priv->apb_base + PCIE_RC_NORMAL_BASE + where); + return 0; + } + + if ((bus == priv->first_busno + 1) && dev == 0) { + old = readl(priv->axi_base + where); + value = pci_conv_size_to_32(old, value, offset, size); + writel(value, priv->axi_base + where); + return 0; + } + + return 0; +} + +static int rockchip_pcie_atr_init(struct rockchip_pcie *priv) +{ + struct udevice *ctlr = pci_get_controller(priv->dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + u64 addr, size, offset; + u32 type; + int i, region; + + /* Use region 0 to map PCI configuration space. */ + writel(25 - 1, priv->apb_base + PCIE_ATR_OB_ADDR0(0)); + writel(0, priv->apb_base + PCIE_ATR_OB_ADDR1(0)); + writel(PCIE_ATR_HDR_CFG_TYPE0 | PCIE_ATR_HDR_RID, + priv->apb_base + PCIE_ATR_OB_DESC0(0)); + writel(0, priv->apb_base + PCIE_ATR_OB_DESC1(0)); + + for (i = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags == PCI_REGION_SYS_MEMORY) + continue; + + if (hose->regions[i].flags == PCI_REGION_IO) + type = PCIE_ATR_HDR_IO; + else + type = PCIE_ATR_HDR_MEM; + + /* Only support identity mappings. */ + if (hose->regions[i].bus_start != + hose->regions[i].phys_start) + return -EINVAL; + + /* Only support mappings aligned on a region boundary. */ + addr = hose->regions[i].bus_start; + if (addr & (PCIE_ATR_OB_REGION_SIZE - 1)) + return -EINVAL; + + /* Mappings should lie between AXI and APB regions. */ + size = hose->regions[i].size; + if (addr < (u64)priv->axi_base + PCIE_ATR_OB_REGION0_SIZE) + return -EINVAL; + if (addr + size > (u64)priv->apb_base) + return -EINVAL; + + offset = addr - (u64)priv->axi_base - PCIE_ATR_OB_REGION0_SIZE; + region = 1 + (offset / PCIE_ATR_OB_REGION_SIZE); + while (size > 0) { + writel(32 - 1, + priv->apb_base + PCIE_ATR_OB_ADDR0(region)); + writel(0, priv->apb_base + PCIE_ATR_OB_ADDR1(region)); + writel(type | PCIE_ATR_HDR_RID, + priv->apb_base + PCIE_ATR_OB_DESC0(region)); + writel(0, priv->apb_base + PCIE_ATR_OB_DESC1(region)); + + addr += PCIE_ATR_OB_REGION_SIZE; + size -= PCIE_ATR_OB_REGION_SIZE; + region++; + } + } + + /* Passthrough inbound translations unmodified. */ + writel(32 - 1, priv->apb_base + PCIE_ATR_IB_ADDR0(2)); + writel(0, priv->apb_base + PCIE_ATR_IB_ADDR1(2)); + + return 0; +} + +static int rockchip_pcie_init_port(struct udevice *dev) +{ + struct rockchip_pcie *priv = dev_get_priv(dev); + u32 cr, val, status; + int ret; + + if (dm_gpio_is_valid(&priv->ep_gpio)) + dm_gpio_set_value(&priv->ep_gpio, 0); + + ret = reset_assert(&priv->aclk_rst); + if (ret) { + dev_err(dev, "failed to assert aclk reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_assert(&priv->pclk_rst); + if (ret) { + dev_err(dev, "failed to assert pclk reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_assert(&priv->pm_rst); + if (ret) { + dev_err(dev, "failed to assert pm reset (ret=%d)\n", ret); + return ret; + } + + ret = generic_phy_init(&priv->pcie_phy); + if (ret) { + dev_err(dev, "failed to init phy (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_assert(&priv->core_rst); + if (ret) { + dev_err(dev, "failed to assert core reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_assert(&priv->mgmt_rst); + if (ret) { + dev_err(dev, "failed to assert mgmt reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_assert(&priv->mgmt_sticky_rst); + if (ret) { + dev_err(dev, "failed to assert mgmt-sticky reset (ret=%d)\n", + ret); + goto err_exit_phy; + } + + ret = reset_assert(&priv->pipe_rst); + if (ret) { + dev_err(dev, "failed to assert pipe reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + udelay(10); + + ret = reset_deassert(&priv->pm_rst); + if (ret) { + dev_err(dev, "failed to deassert pm reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_deassert(&priv->aclk_rst); + if (ret) { + dev_err(dev, "failed to deassert aclk reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + ret = reset_deassert(&priv->pclk_rst); + if (ret) { + dev_err(dev, "failed to deassert pclk reset (ret=%d)\n", ret); + goto err_exit_phy; + } + + /* Select GEN1 for now */ + cr = PCIE_CLIENT_GEN_SEL_1; + /* Set Root complex mode */ + cr |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; + writel(cr, priv->apb_base + PCIE_CLIENT_CONFIG); + + ret = generic_phy_power_on(&priv->pcie_phy); + if (ret) { + dev_err(dev, "failed to power on phy (ret=%d)\n", ret); + goto err_power_off_phy; + } + + ret = reset_deassert(&priv->mgmt_sticky_rst); + if (ret) { + dev_err(dev, "failed to deassert mgmt-sticky reset (ret=%d)\n", + ret); + goto err_power_off_phy; + } + + ret = reset_deassert(&priv->core_rst); + if (ret) { + dev_err(dev, "failed to deassert core reset (ret=%d)\n", ret); + goto err_power_off_phy; + } + + ret = reset_deassert(&priv->mgmt_rst); + if (ret) { + dev_err(dev, "failed to deassert mgmt reset (ret=%d)\n", ret); + goto err_power_off_phy; + } + + ret = reset_deassert(&priv->pipe_rst); + if (ret) { + dev_err(dev, "failed to deassert pipe reset (ret=%d)\n", ret); + goto err_power_off_phy; + } + + /* Enable Gen1 training */ + writel(PCIE_CLIENT_LINK_TRAIN_ENABLE, + priv->apb_base + PCIE_CLIENT_CONFIG); + + if (dm_gpio_is_valid(&priv->ep_gpio)) + dm_gpio_set_value(&priv->ep_gpio, 1); + + ret = readl_poll_sleep_timeout + (priv->apb_base + PCIE_CLIENT_BASIC_STATUS1, + status, PCIE_LINK_UP(status), 20, 500 * 1000); + if (ret) { + dev_err(dev, "PCIe link training gen1 timeout!\n"); + goto err_power_off_phy; + } + + /* Initialize Root Complex registers. */ + writel(PCIE_LM_VENDOR_ROCKCHIP, priv->apb_base + PCIE_LM_VENDOR_ID); + writel(PCI_CLASS_BRIDGE_PCI_NORMAL << 8, + priv->apb_base + PCIE_RC_BASE + PCI_CLASS_REVISION); + writel(PCIE_LM_RCBARPIE | PCIE_LM_RCBARPIS, + priv->apb_base + PCIE_LM_RCBAR); + + if (dev_read_bool(dev, "aspm-no-l0s")) { + val = readl(priv->apb_base + PCIE_RC_PCIE_LCAP); + val &= ~PCIE_RC_PCIE_LCAP_APMS_L0S; + writel(val, priv->apb_base + PCIE_RC_PCIE_LCAP); + } + + /* Configure Address Translation. */ + ret = rockchip_pcie_atr_init(priv); + if (ret) { + dev_err(dev, "PCIE-%d: ATR init failed\n", dev_seq(dev)); + goto err_power_off_phy; + } + + return 0; + +err_power_off_phy: + generic_phy_power_off(&priv->pcie_phy); +err_exit_phy: + generic_phy_exit(&priv->pcie_phy); + return ret; +} + +static int rockchip_pcie_set_vpcie(struct udevice *dev) +{ + struct rockchip_pcie *priv = dev_get_priv(dev); + int ret; + + ret = regulator_set_enable_if_allowed(priv->vpcie12v, true); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable vpcie12v (ret=%d)\n", ret); + return ret; + } + + ret = regulator_set_enable_if_allowed(priv->vpcie3v3, true); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable vpcie3v3 (ret=%d)\n", ret); + goto err_disable_12v; + } + + ret = regulator_set_enable_if_allowed(priv->vpcie1v8, true); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable vpcie1v8 (ret=%d)\n", ret); + goto err_disable_3v3; + } + + ret = regulator_set_enable_if_allowed(priv->vpcie0v9, true); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable vpcie0v9 (ret=%d)\n", ret); + goto err_disable_1v8; + } + + return 0; + +err_disable_1v8: + regulator_set_enable_if_allowed(priv->vpcie1v8, false); +err_disable_3v3: + regulator_set_enable_if_allowed(priv->vpcie3v3, false); +err_disable_12v: + regulator_set_enable_if_allowed(priv->vpcie12v, false); + return ret; +} + +static int rockchip_pcie_parse_dt(struct udevice *dev) +{ + struct rockchip_pcie *priv = dev_get_priv(dev); + int ret; + + priv->axi_base = dev_read_addr_name(dev, "axi-base"); + if (priv->axi_base == FDT_ADDR_T_NONE) + return -EINVAL; + + priv->apb_base = dev_read_addr_name(dev, "apb-base"); + if (priv->apb_base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = reset_get_by_name(dev, "core", &priv->core_rst); + if (ret) { + dev_err(dev, "failed to get core reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "mgmt", &priv->mgmt_rst); + if (ret) { + dev_err(dev, "failed to get mgmt reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "mgmt-sticky", &priv->mgmt_sticky_rst); + if (ret) { + dev_err(dev, "failed to get mgmt-sticky reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "pipe", &priv->pipe_rst); + if (ret) { + dev_err(dev, "failed to get pipe reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "pm", &priv->pm_rst); + if (ret) { + dev_err(dev, "failed to get pm reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "pclk", &priv->pclk_rst); + if (ret) { + dev_err(dev, "failed to get pclk reset (ret=%d)\n", ret); + return ret; + } + + ret = reset_get_by_name(dev, "aclk", &priv->aclk_rst); + if (ret) { + dev_err(dev, "failed to get aclk reset (ret=%d)\n", ret); + return ret; + } + + ret = device_get_supply_regulator(dev, "vpcie12v-supply", + &priv->vpcie12v); + if (ret && ret != -ENOENT) { + dev_err(dev, "failed to get vpcie12v supply (ret=%d)\n", ret); + return ret; + } + + ret = device_get_supply_regulator(dev, "vpcie3v3-supply", + &priv->vpcie3v3); + if (ret && ret != -ENOENT) { + dev_err(dev, "failed to get vpcie3v3 supply (ret=%d)\n", ret); + return ret; + } + + ret = device_get_supply_regulator(dev, "vpcie1v8-supply", + &priv->vpcie1v8); + if (ret && ret != -ENOENT) { + dev_err(dev, "failed to get vpcie1v8 supply (ret=%d)\n", ret); + return ret; + } + + ret = device_get_supply_regulator(dev, "vpcie0v9-supply", + &priv->vpcie0v9); + if (ret && ret != -ENOENT) { + dev_err(dev, "failed to get vpcie0v9 supply (ret=%d)\n", ret); + return ret; + } + + ret = generic_phy_get_by_index(dev, 0, &priv->pcie_phy); + if (ret) { + dev_err(dev, "failed to get pcie-phy (ret=%d)\n", ret); + return ret; + } + + ret = gpio_request_by_name(dev, "ep-gpios", 0, + &priv->ep_gpio, GPIOD_IS_OUT); + if (ret) { + dev_err(dev, "failed to find ep-gpios property\n"); + return ret; + } + + return 0; +} + +static int rockchip_pcie_probe(struct udevice *dev) +{ + struct rockchip_pcie *priv = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int ret; + + priv->first_busno = dev_seq(dev); + priv->dev = dev; + + ret = rockchip_pcie_parse_dt(dev); + if (ret) + return ret; + + ret = rockchip_pcie_set_vpcie(dev); + if (ret) + goto err_gpio_free; + + ret = rockchip_pcie_init_port(dev); + if (ret) + goto err_disable_vpcie; + + dev_info(dev, "PCIE-%d: Link up (Bus%d)\n", + dev_seq(dev), hose->first_busno); + + return 0; + +err_disable_vpcie: + regulator_set_enable_if_allowed(priv->vpcie0v9, false); + regulator_set_enable_if_allowed(priv->vpcie1v8, false); + regulator_set_enable_if_allowed(priv->vpcie3v3, false); + regulator_set_enable_if_allowed(priv->vpcie12v, false); +err_gpio_free: + if (dm_gpio_is_valid(&priv->ep_gpio)) + dm_gpio_free(dev, &priv->ep_gpio); + return ret; +} + +static const struct dm_pci_ops rockchip_pcie_ops = { + .read_config = rockchip_pcie_rd_conf, + .write_config = rockchip_pcie_wr_conf, +}; + +static const struct udevice_id rockchip_pcie_ids[] = { + { .compatible = "rockchip,rk3399-pcie" }, + { } +}; + +U_BOOT_DRIVER(rockchip_pcie) = { + .name = "rockchip_pcie", + .id = UCLASS_PCI, + .of_match = rockchip_pcie_ids, + .ops = &rockchip_pcie_ops, + .probe = rockchip_pcie_probe, + .priv_auto = sizeof(struct rockchip_pcie), +}; diff --git a/drivers/pci/pcie_starfive_jh7110.c b/drivers/pci/pcie_starfive_jh7110.c new file mode 100644 index 00000000000..569fbfd35c8 --- /dev/null +++ b/drivers/pci/pcie_starfive_jh7110.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * StarFive PLDA PCIe host controller driver + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * Author: Mason Huo <mason.huo@starfivetech.com> + * + */ + +#include <clk.h> +#include <dm.h> +#include <pci.h> +#include <pci_ids.h> +#include <power-domain.h> +#include <regmap.h> +#include <reset.h> +#include <syscon.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm-generic/gpio.h> +#include <dm/device_compat.h> +#include <dm/pinctrl.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include "pcie_plda_common.h" + +/* system control */ +#define STG_SYSCON_K_RP_NEP_MASK BIT(8) +#define STG_SYSCON_AXI4_SLVL_ARFUNC_MASK GENMASK(22, 8) +#define STG_SYSCON_AXI4_SLVL_ARFUNC_SHIFT 8 +#define STG_SYSCON_AXI4_SLVL_AWFUNC_MASK GENMASK(14, 0) +#define STG_SYSCON_CLKREQ_MASK BIT(22) +#define STG_SYSCON_CKREF_SRC_SHIFT 18 +#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18) + +DECLARE_GLOBAL_DATA_PTR; + +struct starfive_pcie { + struct pcie_plda plda; + struct clk_bulk clks; + struct reset_ctl_bulk rsts; + struct gpio_desc reset_gpio; + struct regmap *regmap; + u32 stg_arfun; + u32 stg_awfun; + u32 stg_rp_nep; +}; + +static int starfive_pcie_atr_init(struct starfive_pcie *priv) +{ + struct udevice *ctlr = pci_get_controller(priv->plda.dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int i, ret; + + /* + * As the two host bridges in JH7110 soc have the same default + * address translation table, this cause the second root port can't + * access it's host bridge config space correctly. + * To workaround, config the ATR of host bridge config space by SW. + */ + + ret = plda_pcie_set_atr_entry(&priv->plda, + (phys_addr_t)priv->plda.cfg_base, 0, + priv->plda.cfg_size, + XR3PCI_ATR_TRSLID_PCIE_CONFIG); + if (ret) + return ret; + + for (i = 0; i < hose->region_count; i++) { + if (hose->regions[i].flags == PCI_REGION_SYS_MEMORY) + continue; + + /* Only support identity mappings. */ + if (hose->regions[i].bus_start != + hose->regions[i].phys_start) + return -EINVAL; + + ret = plda_pcie_set_atr_entry(&priv->plda, + hose->regions[i].phys_start, + hose->regions[i].bus_start, + hose->regions[i].size, + XR3PCI_ATR_TRSLID_PCIE_MEMORY); + if (ret) + return ret; + } + + return 0; +} + +static int starfive_pcie_get_syscon(struct udevice *dev) +{ + struct starfive_pcie *priv = dev_get_priv(dev); + struct udevice *syscon; + struct ofnode_phandle_args syscfg_phandle; + u32 cells[4]; + int ret; + + /* get corresponding syscon phandle */ + ret = dev_read_phandle_with_args(dev, "starfive,stg-syscon", NULL, 0, 0, + &syscfg_phandle); + + if (ret < 0) { + dev_err(dev, "Can't get syscfg phandle: %d\n", ret); + return ret; + } + + ret = uclass_get_device_by_ofnode(UCLASS_SYSCON, syscfg_phandle.node, + &syscon); + if (ret) { + dev_err(dev, "Unable to find syscon device (%d)\n", ret); + return ret; + } + + priv->regmap = syscon_get_regmap(syscon); + if (!priv->regmap) { + dev_err(dev, "Unable to find regmap\n"); + return -ENODEV; + } + + /* get syscon register offset */ + ret = dev_read_u32_array(dev, "starfive,stg-syscon", + cells, ARRAY_SIZE(cells)); + if (ret) { + dev_err(dev, "Get syscon register err %d\n", ret); + return -EINVAL; + } + + dev_dbg(dev, "Get syscon values: %x, %x, %x\n", + cells[1], cells[2], cells[3]); + priv->stg_arfun = cells[1]; + priv->stg_awfun = cells[2]; + priv->stg_rp_nep = cells[3]; + + return 0; +} + +static int starfive_pcie_parse_dt(struct udevice *dev) +{ + struct starfive_pcie *priv = dev_get_priv(dev); + int ret; + + priv->plda.reg_base = (void *)dev_read_addr_name(dev, "reg"); + if (priv->plda.reg_base == (void __iomem *)FDT_ADDR_T_NONE) { + dev_err(dev, "Missing required reg address range\n"); + return -EINVAL; + } + + priv->plda.cfg_base = + (void *)dev_read_addr_size_name(dev, + "config", + &priv->plda.cfg_size); + if (priv->plda.cfg_base == (void __iomem *)FDT_ADDR_T_NONE) { + dev_err(dev, "Missing required config address range"); + return -EINVAL; + } + + ret = starfive_pcie_get_syscon(dev); + if (ret) { + dev_err(dev, "Can't get syscon: %d\n", ret); + return ret; + } + + ret = reset_get_bulk(dev, &priv->rsts); + if (ret) { + dev_err(dev, "Can't get reset: %d\n", ret); + return ret; + } + + ret = clk_get_bulk(dev, &priv->clks); + if (ret) { + dev_err(dev, "Can't get clock: %d\n", ret); + return ret; + } + + ret = gpio_request_by_name(dev, "reset-gpios", 0, &priv->reset_gpio, + GPIOD_IS_OUT); + if (ret) { + dev_err(dev, "Can't get reset-gpio: %d\n", ret); + return ret; + } + + if (!dm_gpio_is_valid(&priv->reset_gpio)) { + dev_err(dev, "reset-gpio is not valid\n"); + return -EINVAL; + } + return 0; +} + +static int starfive_pcie_init_port(struct udevice *dev) +{ + int ret, i; + struct starfive_pcie *priv = dev_get_priv(dev); + struct pcie_plda *plda = &priv->plda; + + ret = clk_enable_bulk(&priv->clks); + if (ret) { + dev_err(dev, "Failed to enable clks (ret=%d)\n", ret); + return ret; + } + + ret = reset_deassert_bulk(&priv->rsts); + if (ret) { + dev_err(dev, "Failed to deassert resets (ret=%d)\n", ret); + goto err_deassert_clk; + } + + dm_gpio_set_value(&priv->reset_gpio, 1); + /* Disable physical functions except #0 */ + for (i = 1; i < PLDA_FUNC_NUM; i++) { + regmap_update_bits(priv->regmap, + priv->stg_arfun, + STG_SYSCON_AXI4_SLVL_ARFUNC_MASK, + (i << PLDA_PHY_FUNC_SHIFT) << + STG_SYSCON_AXI4_SLVL_ARFUNC_SHIFT); + regmap_update_bits(priv->regmap, + priv->stg_awfun, + STG_SYSCON_AXI4_SLVL_AWFUNC_MASK, + i << PLDA_PHY_FUNC_SHIFT); + + plda_pcie_disable_func(plda); + } + + /* Disable physical functions */ + regmap_update_bits(priv->regmap, + priv->stg_arfun, + STG_SYSCON_AXI4_SLVL_ARFUNC_MASK, + 0); + regmap_update_bits(priv->regmap, + priv->stg_awfun, + STG_SYSCON_AXI4_SLVL_AWFUNC_MASK, + 0); + + plda_pcie_enable_root_port(plda); + + /* PCIe PCI Standard Configuration Identification Settings. */ + plda_pcie_set_standard_class(plda); + + /* + * The LTR message forwarding of PCIe Message Reception was set by core + * as default, but the forward id & addr are also need to be reset. + * If we do not disable LTR message forwarding here, or set a legal + * forwarding address, the kernel will get stuck after this driver probe. + * To workaround, disable the LTR message forwarding support on + * PCIe Message Reception. + */ + plda_pcie_disable_ltr(plda); + + /* Prefetchable memory window 64-bit addressing support */ + plda_pcie_set_pref_win_64bit(plda); + starfive_pcie_atr_init(priv); + + dm_gpio_set_value(&priv->reset_gpio, 0); + /* Ensure that PERST in default at least 300 ms */ + mdelay(300); + + return 0; + +err_deassert_clk: + clk_disable_bulk(&priv->clks); + return ret; +} + +static int starfive_pcie_probe(struct udevice *dev) +{ + struct starfive_pcie *priv = dev_get_priv(dev); + int ret; + + priv->plda.atr_table_num = 0; + priv->plda.dev = dev; + + ret = starfive_pcie_parse_dt(dev); + if (ret) + return ret; + + regmap_update_bits(priv->regmap, + priv->stg_rp_nep, + STG_SYSCON_K_RP_NEP_MASK, + STG_SYSCON_K_RP_NEP_MASK); + + regmap_update_bits(priv->regmap, + priv->stg_awfun, + STG_SYSCON_CKREF_SRC_MASK, + 2 << STG_SYSCON_CKREF_SRC_SHIFT); + + regmap_update_bits(priv->regmap, + priv->stg_awfun, + STG_SYSCON_CLKREQ_MASK, + STG_SYSCON_CLKREQ_MASK); + + ret = starfive_pcie_init_port(dev); + if (ret) + return ret; + + dev_err(dev, "Starfive PCIe bus probed.\n"); + + return 0; +} + +static const struct dm_pci_ops starfive_pcie_ops = { + .read_config = plda_pcie_config_read, + .write_config = plda_pcie_config_write, +}; + +static const struct udevice_id starfive_pcie_ids[] = { + { .compatible = "starfive,jh7110-pcie" }, + { } +}; + +U_BOOT_DRIVER(starfive_pcie_drv) = { + .name = "starfive_7110_pcie", + .id = UCLASS_PCI, + .of_match = starfive_pcie_ids, + .ops = &starfive_pcie_ops, + .probe = starfive_pcie_probe, + .priv_auto = sizeof(struct starfive_pcie), +}; diff --git a/drivers/pci/pcie_uniphier.c b/drivers/pci/pcie_uniphier.c new file mode 100644 index 00000000000..d1170b576bc --- /dev/null +++ b/drivers/pci/pcie_uniphier.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pcie_uniphier.c - Socionext UniPhier PCIe driver + * Copyright 2019-2021 Socionext, Inc. + */ + +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <generic-phy.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/compat.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <pci.h> +#include <reset.h> + +DECLARE_GLOBAL_DATA_PTR; + +/* DBI registers */ +#define PCIE_LINK_STATUS_REG 0x0080 +#define PCIE_LINK_STATUS_WIDTH_MASK GENMASK(25, 20) +#define PCIE_LINK_STATUS_SPEED_MASK GENMASK(19, 16) + +#define PCIE_MISC_CONTROL_1_OFF 0x08BC +#define PCIE_DBI_RO_WR_EN BIT(0) + +/* DBI iATU registers */ +#define PCIE_ATU_VIEWPORT 0x0900 +#define PCIE_ATU_REGION_INBOUND BIT(31) +#define PCIE_ATU_REGION_OUTBOUND 0 +#define PCIE_ATU_REGION_INDEX_MASK GENMASK(3, 0) + +#define PCIE_ATU_CR1 0x0904 +#define PCIE_ATU_TYPE_MEM 0 +#define PCIE_ATU_TYPE_IO 2 +#define PCIE_ATU_TYPE_CFG0 4 +#define PCIE_ATU_TYPE_CFG1 5 + +#define PCIE_ATU_CR2 0x0908 +#define PCIE_ATU_ENABLE BIT(31) +#define PCIE_ATU_MATCH_MODE BIT(30) +#define PCIE_ATU_BAR_NUM_MASK GENMASK(10, 8) + +#define PCIE_ATU_LOWER_BASE 0x090C +#define PCIE_ATU_UPPER_BASE 0x0910 +#define PCIE_ATU_LIMIT 0x0914 +#define PCIE_ATU_LOWER_TARGET 0x0918 +#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x) +#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x) +#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x) +#define PCIE_ATU_UPPER_TARGET 0x091C + +/* Link Glue registers */ +#define PCL_PINCTRL0 0x002c +#define PCL_PERST_PLDN_REGEN BIT(12) +#define PCL_PERST_NOE_REGEN BIT(11) +#define PCL_PERST_OUT_REGEN BIT(8) +#define PCL_PERST_PLDN_REGVAL BIT(4) +#define PCL_PERST_NOE_REGVAL BIT(3) +#define PCL_PERST_OUT_REGVAL BIT(0) + +#define PCL_MODE 0x8000 +#define PCL_MODE_REGEN BIT(8) +#define PCL_MODE_REGVAL BIT(0) + +#define PCL_APP_READY_CTRL 0x8008 +#define PCL_APP_LTSSM_ENABLE BIT(0) + +#define PCL_APP_PM0 0x8078 +#define PCL_SYS_AUX_PWR_DET BIT(8) + +#define PCL_STATUS_LINK 0x8140 +#define PCL_RDLH_LINK_UP BIT(1) +#define PCL_XMLH_LINK_UP BIT(0) + +#define LINK_UP_TIMEOUT_MS 100 + +struct uniphier_pcie_priv { + void *base; + void *dbi_base; + void *cfg_base; + fdt_size_t cfg_size; + struct fdt_resource link_res; + struct fdt_resource dbi_res; + struct fdt_resource cfg_res; + + struct clk clk; + struct reset_ctl rst; + struct phy phy; + + struct pci_region io; + struct pci_region mem; +}; + +static int pcie_dw_get_link_speed(struct uniphier_pcie_priv *priv) +{ + u32 val = readl(priv->dbi_base + PCIE_LINK_STATUS_REG); + + return FIELD_GET(PCIE_LINK_STATUS_SPEED_MASK, val); +} + +static int pcie_dw_get_link_width(struct uniphier_pcie_priv *priv) +{ + u32 val = readl(priv->dbi_base + PCIE_LINK_STATUS_REG); + + return FIELD_GET(PCIE_LINK_STATUS_WIDTH_MASK, val); +} + +static void pcie_dw_prog_outbound_atu(struct uniphier_pcie_priv *priv, + int index, int type, u64 cpu_addr, + u64 pci_addr, u32 size) +{ + writel(PCIE_ATU_REGION_OUTBOUND + | FIELD_PREP(PCIE_ATU_REGION_INDEX_MASK, index), + priv->dbi_base + PCIE_ATU_VIEWPORT); + writel(lower_32_bits(cpu_addr), + priv->dbi_base + PCIE_ATU_LOWER_BASE); + writel(upper_32_bits(cpu_addr), + priv->dbi_base + PCIE_ATU_UPPER_BASE); + writel(lower_32_bits(cpu_addr + size - 1), + priv->dbi_base + PCIE_ATU_LIMIT); + writel(lower_32_bits(pci_addr), + priv->dbi_base + PCIE_ATU_LOWER_TARGET); + writel(upper_32_bits(pci_addr), + priv->dbi_base + PCIE_ATU_UPPER_TARGET); + + writel(type, priv->dbi_base + PCIE_ATU_CR1); + writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); +} + +static int uniphier_pcie_addr_valid(pci_dev_t bdf, int first_busno) +{ + /* accept only device {0,1} on first bus */ + if ((PCI_BUS(bdf) != first_busno) || (PCI_DEV(bdf) > 1)) + return -EINVAL; + + return 0; +} + +static int uniphier_pcie_conf_address(const struct udevice *dev, pci_dev_t bdf, + uint offset, void **paddr) +{ + struct uniphier_pcie_priv *priv = dev_get_priv(dev); + u32 busdev; + int seq = dev_seq(dev); + int ret; + + ret = uniphier_pcie_addr_valid(bdf, seq); + if (ret) + return ret; + + if ((PCI_BUS(bdf) == seq) && !PCI_DEV(bdf)) { + *paddr = (void *)(priv->dbi_base + offset); + return 0; + } + + busdev = PCIE_ATU_BUS(PCI_BUS(bdf) - seq) + | PCIE_ATU_DEV(PCI_DEV(bdf)) + | PCIE_ATU_FUNC(PCI_FUNC(bdf)); + + pcie_dw_prog_outbound_atu(priv, 0, + PCIE_ATU_TYPE_CFG0, (u64)priv->cfg_base, + busdev, priv->cfg_size); + *paddr = (void *)(priv->cfg_base + offset); + + return 0; +} + +static int uniphier_pcie_read_config(const struct udevice *dev, pci_dev_t bdf, + uint offset, ulong *valp, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(dev, uniphier_pcie_conf_address, + bdf, offset, valp, size); +} + +static int uniphier_pcie_write_config(struct udevice *dev, pci_dev_t bdf, + uint offset, ulong val, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(dev, uniphier_pcie_conf_address, + bdf, offset, val, size); +} + +static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv, + bool enable) +{ + u32 val; + + val = readl(priv->base + PCL_APP_READY_CTRL); + if (enable) + val |= PCL_APP_LTSSM_ENABLE; + else + val &= ~PCL_APP_LTSSM_ENABLE; + writel(val, priv->base + PCL_APP_READY_CTRL); +} + +static int uniphier_pcie_link_up(struct uniphier_pcie_priv *priv) +{ + u32 val, mask; + + val = readl(priv->base + PCL_STATUS_LINK); + mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP; + + return (val & mask) == mask; +} + +static int uniphier_pcie_wait_link(struct uniphier_pcie_priv *priv) +{ + unsigned long timeout; + + timeout = get_timer(0) + LINK_UP_TIMEOUT_MS; + + while (get_timer(0) < timeout) { + if (uniphier_pcie_link_up(priv)) + return 0; + } + + return -ETIMEDOUT; +} + +static int uniphier_pcie_establish_link(struct uniphier_pcie_priv *priv) +{ + if (uniphier_pcie_link_up(priv)) + return 0; + + uniphier_pcie_ltssm_enable(priv, true); + + return uniphier_pcie_wait_link(priv); +} + +static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv) +{ + u32 val; + + /* set RC mode */ + val = readl(priv->base + PCL_MODE); + val |= PCL_MODE_REGEN; + val &= ~PCL_MODE_REGVAL; + writel(val, priv->base + PCL_MODE); + + /* use auxiliary power detection */ + val = readl(priv->base + PCL_APP_PM0); + val |= PCL_SYS_AUX_PWR_DET; + writel(val, priv->base + PCL_APP_PM0); + + /* assert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL + | PCL_PERST_PLDN_REGVAL); + val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN + | PCL_PERST_PLDN_REGEN; + writel(val, priv->base + PCL_PINCTRL0); + + uniphier_pcie_ltssm_enable(priv, false); + + mdelay(100); + + /* deassert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; + writel(val, priv->base + PCL_PINCTRL0); +} + +static void uniphier_pcie_setup_rc(struct uniphier_pcie_priv *priv, + struct pci_controller *hose) +{ + /* Store the IO and MEM windows settings for future use by the ATU */ + priv->io.phys_start = hose->regions[0].phys_start; /* IO base */ + priv->io.bus_start = hose->regions[0].bus_start; /* IO_bus_addr */ + priv->io.size = hose->regions[0].size; /* IO size */ + priv->mem.phys_start = hose->regions[1].phys_start; /* MEM base */ + priv->mem.bus_start = hose->regions[1].bus_start; /* MEM_bus_addr */ + priv->mem.size = hose->regions[1].size; /* MEM size */ + + /* outbound: IO */ + pcie_dw_prog_outbound_atu(priv, 0, + PCIE_ATU_TYPE_IO, priv->io.phys_start, + priv->io.bus_start, priv->io.size); + + /* outbound: MEM */ + pcie_dw_prog_outbound_atu(priv, 1, + PCIE_ATU_TYPE_MEM, priv->mem.phys_start, + priv->mem.bus_start, priv->mem.size); +} + +static int uniphier_pcie_probe(struct udevice *dev) +{ + struct uniphier_pcie_priv *priv = dev_get_priv(dev); + struct udevice *ctlr = pci_get_controller(dev); + struct pci_controller *hose = dev_get_uclass_priv(ctlr); + int ret; + + priv->base = map_physmem(priv->link_res.start, + fdt_resource_size(&priv->link_res), + MAP_NOCACHE); + priv->dbi_base = map_physmem(priv->dbi_res.start, + fdt_resource_size(&priv->dbi_res), + MAP_NOCACHE); + priv->cfg_size = fdt_resource_size(&priv->cfg_res); + priv->cfg_base = map_physmem(priv->cfg_res.start, + priv->cfg_size, MAP_NOCACHE); + + ret = clk_enable(&priv->clk); + if (ret) { + dev_err(dev, "Failed to enable clk: %d\n", ret); + return ret; + } + ret = reset_deassert(&priv->rst); + if (ret) { + dev_err(dev, "Failed to deassert reset: %d\n", ret); + goto out_clk_release; + } + + ret = generic_phy_init(&priv->phy); + if (ret) { + dev_err(dev, "Failed to initialize phy: %d\n", ret); + goto out_reset_release; + } + + ret = generic_phy_power_on(&priv->phy); + if (ret) { + dev_err(dev, "Failed to power on phy: %d\n", ret); + goto out_phy_exit; + } + + uniphier_pcie_init_rc(priv); + + /* set DBI to read only */ + writel(0, priv->dbi_base + PCIE_MISC_CONTROL_1_OFF); + + uniphier_pcie_setup_rc(priv, hose); + + if (uniphier_pcie_establish_link(priv)) { + printf("PCIE-%d: Link down\n", dev_seq(dev)); + } else { + printf("PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n", + dev_seq(dev), pcie_dw_get_link_speed(priv), + pcie_dw_get_link_width(priv), hose->first_busno); + } + + return 0; + +out_phy_exit: + generic_phy_exit(&priv->phy); +out_reset_release: + reset_release_all(&priv->rst, 1); +out_clk_release: + clk_release_all(&priv->clk, 1); + + return ret; +} + +static int uniphier_pcie_of_to_plat(struct udevice *dev) +{ + struct uniphier_pcie_priv *priv = dev_get_priv(dev); + const void *fdt = gd->fdt_blob; + int node = dev_of_offset(dev); + int ret; + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "link", &priv->link_res); + if (ret) { + dev_err(dev, "Failed to get link regs: %d\n", ret); + return ret; + } + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "dbi", &priv->dbi_res); + if (ret) { + dev_err(dev, "Failed to get dbi regs: %d\n", ret); + return ret; + } + + ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", + "config", &priv->cfg_res); + if (ret) { + dev_err(dev, "Failed to get config regs: %d\n", ret); + return ret; + } + + ret = clk_get_by_index(dev, 0, &priv->clk); + if (ret) { + dev_err(dev, "Failed to get clocks property: %d\n", ret); + return ret; + } + + ret = reset_get_by_index(dev, 0, &priv->rst); + if (ret) { + dev_err(dev, "Failed to get resets property: %d\n", ret); + return ret; + } + + ret = generic_phy_get_by_index(dev, 0, &priv->phy); + if (ret) { + dev_err(dev, "Failed to get phy property: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct dm_pci_ops uniphier_pcie_ops = { + .read_config = uniphier_pcie_read_config, + .write_config = uniphier_pcie_write_config, +}; + +static const struct udevice_id uniphier_pcie_ids[] = { + { .compatible = "socionext,uniphier-pcie", }, + { /* Sentinel */ } +}; + +U_BOOT_DRIVER(pcie_uniphier) = { + .name = "uniphier-pcie", + .id = UCLASS_PCI, + .of_match = uniphier_pcie_ids, + .probe = uniphier_pcie_probe, + .ops = &uniphier_pcie_ops, + .of_to_plat = uniphier_pcie_of_to_plat, + .priv_auto = sizeof(struct uniphier_pcie_priv), +}; diff --git a/drivers/pci/pcie_xilinx.c b/drivers/pci/pcie_xilinx.c new file mode 100644 index 00000000000..a674ab04bee --- /dev/null +++ b/drivers/pci/pcie_xilinx.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx AXI Bridge for PCI Express Driver + * + * Copyright (C) 2016 Imagination Technologies + */ + +#include <dm.h> +#include <pci.h> +#include <linux/bitops.h> +#include <linux/printk.h> +#include <linux/io.h> +#include <linux/err.h> + +/** + * struct xilinx_pcie - Xilinx PCIe controller state + * @cfg_base: The base address of memory mapped configuration space + */ +struct xilinx_pcie { + void *cfg_base; +}; + +/* Register definitions */ +#define XILINX_PCIE_REG_PSCR 0x144 +#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) +#define XILINX_PCIE_REG_RPSC 0x148 +#define XILINX_PCIE_REG_RPSC_BEN BIT(0) + +/** + * pcie_xilinx_link_up() - Check whether the PCIe link is up + * @pcie: Pointer to the PCI controller state + * + * Checks whether the PCIe link for the given device is up or down. + * + * Return: true if the link is up, else false + */ +static bool pcie_xilinx_link_up(struct xilinx_pcie *pcie) +{ + uint32_t pscr = __raw_readl(pcie->cfg_base + XILINX_PCIE_REG_PSCR); + + return pscr & XILINX_PCIE_REG_PSCR_LNKUP; +} + +/** + * pcie_xilinx_config_address() - Calculate the address of a config access + * @udev: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @paddress: Pointer to the pointer to write the calculates address to + * + * Calculates the address that should be accessed to perform a PCIe + * configuration space access for a given device identified by the PCIe + * controller device @pcie and the bus, device & function numbers in @bdf. If + * access to the device is not valid then the function will return an error + * code. Otherwise the address to access will be written to the pointer pointed + * to by @paddress. + * + * Return: 0 on success, else -ENODEV + */ +static int pcie_xilinx_config_address(const struct udevice *udev, pci_dev_t bdf, + uint offset, void **paddress) +{ + struct xilinx_pcie *pcie = dev_get_priv(udev); + unsigned int bus = PCI_BUS(bdf); + unsigned int dev = PCI_DEV(bdf); + unsigned int func = PCI_FUNC(bdf); + void *addr; + + if ((bus > 0) && !pcie_xilinx_link_up(pcie)) + return -ENODEV; + + /* + * Busses 0 (host-PCIe bridge) & 1 (its immediate child) are + * limited to a single device each. + */ + if ((bus < 2) && (dev > 0)) + return -ENODEV; + + addr = pcie->cfg_base; + addr += PCIE_ECAM_OFFSET(bus, dev, func, offset); + *paddress = addr; + + return 0; +} + +/** + * pcie_xilinx_read_config() - Read from configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @valuep: A pointer at which to store the read value + * @size: Indicates the size of access to perform + * + * Read a value of size @size from offset @offset within the configuration + * space of the device identified by the bus, device & function numbers in @bdf + * on the PCI bus @bus. + * + * Return: 0 on success, else -ENODEV or -EINVAL + */ +static int pcie_xilinx_read_config(const struct udevice *bus, pci_dev_t bdf, + uint offset, ulong *valuep, + enum pci_size_t size) +{ + return pci_generic_mmap_read_config(bus, pcie_xilinx_config_address, + bdf, offset, valuep, size); +} + +/** + * pcie_xilinx_write_config() - Write to configuration space + * @bus: Pointer to the PCI bus + * @bdf: Identifies the PCIe device to access + * @offset: The offset into the device's configuration space + * @value: The value to write + * @size: Indicates the size of access to perform + * + * Write the value @value of size @size from offset @offset within the + * configuration space of the device identified by the bus, device & function + * numbers in @bdf on the PCI bus @bus. + * + * Return: 0 on success, else -ENODEV or -EINVAL + */ +static int pcie_xilinx_write_config(struct udevice *bus, pci_dev_t bdf, + uint offset, ulong value, + enum pci_size_t size) +{ + return pci_generic_mmap_write_config(bus, pcie_xilinx_config_address, + bdf, offset, value, size); +} + +/** + * pcie_xilinx_of_to_plat() - Translate from DT to device state + * @dev: A pointer to the device being operated on + * + * Translate relevant data from the device tree pertaining to device @dev into + * state that the driver will later make use of. This state is stored in the + * device's private data structure. + * + * Return: 0 on success, else -EINVAL + */ +static int pcie_xilinx_of_to_plat(struct udevice *dev) +{ + struct xilinx_pcie *pcie = dev_get_priv(dev); + fdt_addr_t addr; + fdt_size_t size; + u32 rpsc; + + addr = dev_read_addr_size(dev, &size); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + + pcie->cfg_base = devm_ioremap(dev, addr, size); + if (IS_ERR(pcie->cfg_base)) + return PTR_ERR(pcie->cfg_base); + + /* Enable the Bridge enable bit */ + rpsc = __raw_readl(pcie->cfg_base + XILINX_PCIE_REG_RPSC); + rpsc |= XILINX_PCIE_REG_RPSC_BEN; + __raw_writel(rpsc, pcie->cfg_base + XILINX_PCIE_REG_RPSC); + + return 0; +} + +static const struct dm_pci_ops pcie_xilinx_ops = { + .read_config = pcie_xilinx_read_config, + .write_config = pcie_xilinx_write_config, +}; + +static const struct udevice_id pcie_xilinx_ids[] = { + { .compatible = "xlnx,axi-pcie-host-1.00.a" }, + { } +}; + +U_BOOT_DRIVER(pcie_xilinx) = { + .name = "pcie_xilinx", + .id = UCLASS_PCI, + .of_match = pcie_xilinx_ids, + .ops = &pcie_xilinx_ops, + .of_to_plat = pcie_xilinx_of_to_plat, + .priv_auto = sizeof(struct xilinx_pcie), +}; |