diff options
author | Bjorn Helgaas | 2020-08-05 18:24:18 -0500 |
---|---|---|
committer | Bjorn Helgaas | 2020-08-05 18:24:18 -0500 |
commit | 13a77336f4f5681068bbaa4eac3ba8adff684a54 (patch) | |
tree | 22ed45537c1789610c8acf77dfd9ccb4f3d4c070 /drivers | |
parent | 5b17dbab263035aa1dc8e61637bc719f47c60daa (diff) | |
parent | 0dbe77c9d2f8e6ef1792db99e150dd0025a977eb (diff) |
Merge branch 'remotes/lorenzo/pci/cadence'
- Convert cadence to use standard "dma-ranges" DT property instead of its
own "cdns,no-bar-match-nbits" (Kishon Vijay Abraham I)
- Fix pm_runtime_put_sync() issues in cadence error paths (Kishon Vijay
Abraham I)
- Add PTR_ALIGN_DOWN macro (Kishon Vijay Abraham I)
- Convert cadence r/w accessors to only 32-bit accesses (Kishon Vijay
Abraham I)
- Add cadence support to start Link and check Link status (Kishon Vijay
Abraham I)
- Allow custom PCI ops for cadence-based drivers (Kishon Vijay Abraham I)
- Remove "mem" from cadence reg binding since it's not memory and it
overlaps the PCIe config and memory region (Kishon Vijay Abraham I)
- Add cadence ->cpu_addr_fixup() for platforms that require absolute
addresses in the ATU, not just offsets (Kishon Vijay Abraham I)
- Update cadence Vendor IDs using local management registers, not
architected config space (Kishon Vijay Abraham I)
- Add cadence endpoint driver MSI-X support (Kishon Vijay Abraham I)
- Add bindings and driver for TI J721E SoC, supporting both host and
endpoint mode (Kishon Vijay Abraham I)
* remotes/lorenzo/pci/cadence:
MAINTAINERS: Add Kishon Vijay Abraham I for TI J721E SoC PCIe
misc: pci_endpoint_test: Add J721E in pci_device_id table
PCI: j721e: Add TI J721E PCIe driver
dt-bindings: PCI: Add EP mode dt-bindings for TI's J721E SoC
dt-bindings: PCI: Add host mode dt-bindings for TI's J721E SoC
PCI: cadence: Add MSI-X support to Endpoint driver
PCI: cadence: Fix updating Vendor ID and Subsystem Vendor ID register
PCI: cadence: Add new *ops* for CPU addr fixup
dt-bindings: PCI: cadence: Remove "mem" from reg binding
PCI: cadence: Allow pci_host_bridge to have custom pci_ops
PCI: cadence: Add support to start link and verify link status
PCI: cadence: Convert all r/w accessors to perform only 32-bit accesses
linux/kernel.h: Add PTR_ALIGN_DOWN macro
PCI: cadence: Fix cdns_pcie_{host|ep}_setup() error path
PCI: cadence: Use "dma-ranges" instead of "cdns,no-bar-match-nbits" property
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/pci_endpoint_test.c | 9 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/Kconfig | 23 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/Makefile | 1 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pci-j721e.c | 493 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-ep.c | 128 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-host.c | 311 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-plat.c | 13 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence.c | 8 | ||||
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence.h | 161 |
9 files changed, 1083 insertions, 64 deletions
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 41c40971979e..e060796f9caa 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -68,6 +68,7 @@ #define PCI_ENDPOINT_TEST_FLAGS 0x2c #define FLAG_USE_DMA BIT(0) +#define PCI_DEVICE_ID_TI_J721E 0xb00d #define PCI_DEVICE_ID_TI_AM654 0xb00c #define is_am654_pci_dev(pdev) \ @@ -932,6 +933,11 @@ static const struct pci_endpoint_test_data am654_data = { .irq_type = IRQ_TYPE_MSI, }; +static const struct pci_endpoint_test_data j721e_data = { + .alignment = 256, + .irq_type = IRQ_TYPE_MSI, +}; + static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), .driver_data = (kernel_ulong_t)&default_data, @@ -946,6 +952,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { }, { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0), }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), + .driver_data = (kernel_ulong_t)&j721e_data, + }, { } }; MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index b76b3cf55ce5..5d30564190e1 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -42,4 +42,27 @@ config PCIE_CADENCE_PLAT_EP endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. +config PCI_J721E + bool + +config PCI_J721E_HOST + bool "TI J721E PCIe platform host controller" + depends on OF + select PCIE_CADENCE_HOST + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in host mode. TI J721E PCIe controller uses Cadence PCIe + core. + +config PCI_J721E_EP + bool "TI J721E PCIe platform endpoint controller" + depends on OF + depends on PCI_ENDPOINT + select PCIE_CADENCE_EP + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe + core. endmenu diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile index 232a3f20876a..9bac5fb2f13d 100644 --- a/drivers/pci/controller/cadence/Makefile +++ b/drivers/pci/controller/cadence/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o +obj-$(CONFIG_PCI_J721E) += pci-j721e.o diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c new file mode 100644 index 000000000000..23ad8fa699c4 --- /dev/null +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * pci-j721e - PCIe controller driver for TI's J721E SoCs + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include "../../pci.h" +#include "pcie-cadence.h" + +#define ENABLE_REG_SYS_2 0x108 +#define STATUS_REG_SYS_2 0x508 +#define STATUS_CLR_REG_SYS_2 0x708 +#define LINK_DOWN BIT(1) + +#define J721E_PCIE_USER_CMD_STATUS 0x4 +#define LINK_TRAINING_ENABLE BIT(0) + +#define J721E_PCIE_USER_LINKSTATUS 0x14 +#define LINK_STATUS GENMASK(1, 0) + +enum link_status { + NO_RECEIVERS_DETECTED, + LINK_TRAINING_IN_PROGRESS, + LINK_UP_DL_IN_PROGRESS, + LINK_UP_DL_COMPLETED, +}; + +#define J721E_MODE_RC BIT(7) +#define LANE_COUNT_MASK BIT(8) +#define LANE_COUNT(n) ((n) << 8) + +#define GENERATION_SEL_MASK GENMASK(1, 0) + +#define MAX_LANES 2 + +struct j721e_pcie { + struct device *dev; + u32 mode; + u32 num_lanes; + struct cdns_pcie *cdns_pcie; + void __iomem *user_cfg_base; + void __iomem *intd_cfg_base; +}; + +enum j721e_pcie_mode { + PCI_MODE_RC, + PCI_MODE_EP, +}; + +struct j721e_pcie_data { + enum j721e_pcie_mode mode; +}; + +static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->user_cfg_base + offset); +} + +static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->user_cfg_base + offset); +} + +static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->intd_cfg_base + offset); +} + +static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->intd_cfg_base + offset); +} + +static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) +{ + struct j721e_pcie *pcie = priv; + struct device *dev = pcie->dev; + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); + if (!(reg & LINK_DOWN)) + return IRQ_NONE; + + dev_err(dev, "LINK DOWN!\n"); + + j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN); + return IRQ_HANDLED; +} + +static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) +{ + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); + reg |= LINK_DOWN; + j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); +} + +static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg |= LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); + + return 0; +} + +static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg &= ~LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); +} + +static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); + reg &= LINK_STATUS; + if (reg == LINK_UP_DL_COMPLETED) + return true; + + return false; +} + +static const struct cdns_pcie_ops j721e_pcie_ops = { + .start_link = j721e_pcie_start_link, + .stop_link = j721e_pcie_stop_link, + .link_up = j721e_pcie_link_up, +}; + +static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 mask = J721E_MODE_RC; + u32 mode = pcie->mode; + u32 val = 0; + int ret = 0; + + if (mode == PCI_MODE_RC) + val = J721E_MODE_RC; + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) + dev_err(dev, "failed to set pcie mode\n"); + + return ret; +} + +static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + int link_speed; + u32 val = 0; + int ret; + + link_speed = of_pci_get_max_link_speed(np); + if (link_speed < 2) + link_speed = 2; + + val = link_speed - 1; + ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val); + if (ret) + dev_err(dev, "failed to set link speed\n"); + + return ret; +} + +static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 lanes = pcie->num_lanes; + u32 val = 0; + int ret; + + val = LANE_COUNT(lanes - 1); + ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val); + if (ret) + dev_err(dev, "failed to set link count\n"); + + return ret; +} + +static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct regmap *syscon; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl"); + if (IS_ERR(syscon)) { + dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n"); + return PTR_ERR(syscon); + } + + ret = j721e_pcie_set_mode(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set pci mode\n"); + return ret; + } + + ret = j721e_pcie_set_link_speed(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set link speed\n"); + return ret; + } + + ret = j721e_pcie_set_lane_count(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set num-lanes\n"); + return ret; + } + + return 0; +} + +static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + unsigned int busn = bus->number; + + if (busn == rc->bus_range->start) + return pci_generic_config_read32(bus, devfn, where, size, + value); + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + unsigned int busn = bus->number; + + if (busn == rc->bus_range->start) + return pci_generic_config_write32(bus, devfn, where, size, + value); + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static struct pci_ops cdns_ti_pcie_host_ops = { + .map_bus = cdns_pci_map_bus, + .read = cdns_ti_pcie_config_read, + .write = cdns_ti_pcie_config_write, +}; + +static const struct j721e_pcie_data j721e_pcie_rc_data = { + .mode = PCI_MODE_RC, +}; + +static const struct j721e_pcie_data j721e_pcie_ep_data = { + .mode = PCI_MODE_EP, +}; + +static const struct of_device_id of_j721e_pcie_match[] = { + { + .compatible = "ti,j721e-pcie-host", + .data = &j721e_pcie_rc_data, + }, + { + .compatible = "ti,j721e-pcie-ep", + .data = &j721e_pcie_ep_data, + }, + {}, +}; + +static int j721e_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct pci_host_bridge *bridge; + struct j721e_pcie_data *data; + struct cdns_pcie *cdns_pcie; + struct j721e_pcie *pcie; + struct cdns_pcie_rc *rc; + struct cdns_pcie_ep *ep; + struct gpio_desc *gpiod; + void __iomem *base; + u32 num_lanes; + u32 mode; + int ret; + int irq; + + data = (struct j721e_pcie_data *)of_device_get_match_data(dev); + if (!data) + return -EINVAL; + + mode = (u32)data->mode; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = dev; + pcie->mode = mode; + + base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->intd_cfg_base = base; + + base = devm_platform_ioremap_resource_byname(pdev, "user_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->user_cfg_base = base; + + ret = of_property_read_u32(node, "num-lanes", &num_lanes); + if (ret || num_lanes > MAX_LANES) + num_lanes = 1; + pcie->num_lanes = num_lanes; + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) + return -EINVAL; + + irq = platform_get_irq_byname(pdev, "link_state"); + if (irq < 0) + return irq; + + dev_set_drvdata(dev, pcie); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = j721e_pcie_ctrl_init(pcie); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, + "j721e-pcie-link-down-irq", pcie); + if (ret < 0) { + dev_err(dev, "failed to request link state IRQ %d\n", irq); + goto err_get_sync; + } + + j721e_pcie_config_link_irq(pcie); + + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { + ret = -ENODEV; + goto err_get_sync; + } + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) { + ret = -ENOMEM; + goto err_get_sync; + } + + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset GPIO\n"); + goto err_get_sync; + } + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + /* + * "Power Sequencing and Reset Signal Timings" table in + * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 + * indicates PERST# should be deasserted after minimum of 100us + * once REFCLK is stable. The REFCLK to the connector in RC + * mode is selected while enabling the PHY. So deassert PERST# + * after 100 us. + */ + if (gpiod) { + usleep_range(100, 200); + gpiod_set_value_cansleep(gpiod, 1); + } + + ret = cdns_pcie_host_setup(rc); + if (ret < 0) + goto err_pcie_setup; + + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { + ret = -ENODEV; + goto err_get_sync; + } + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto err_get_sync; + } + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + ret = cdns_pcie_ep_setup(ep); + if (ret < 0) + goto err_pcie_setup; + + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + + return 0; + +err_pcie_setup: + cdns_pcie_disable_phy(cdns_pcie); + +err_get_sync: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int j721e_pcie_remove(struct platform_device *pdev) +{ + struct j721e_pcie *pcie = platform_get_drvdata(pdev); + struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; + struct device *dev = &pdev->dev; + + cdns_pcie_disable_phy(cdns_pcie); + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static struct platform_driver j721e_pcie_driver = { + .probe = j721e_pcie_probe, + .remove = j721e_pcie_remove, + .driver = { + .name = "j721e-pcie", + .of_match_table = of_j721e_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(j721e_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1c15c8352125..ec1306da301f 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -8,7 +8,6 @@ #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/sizes.h> #include "pcie-cadence.h" @@ -52,6 +51,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; @@ -112,6 +112,8 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); + epf->epf_bar[bar] = epf_bar; + return 0; } @@ -119,6 +121,7 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; @@ -140,6 +143,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); + + epf->epf_bar[bar] = NULL; } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, @@ -225,10 +230,55 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) return mme; } +static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, + enum pci_barno bir, u32 offset) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, fn, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; + cdns_pcie_ep_fn_writew(pcie, fn, reg, val); + + /* Set MSIX BAR and offset */ + reg = cap + PCI_MSIX_TABLE; + val = offset | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + /* Set PBA BAR and offset. BAR must match MSIX BAR */ + reg = cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + return 0; +} + static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; + unsigned long flags; u32 offset; u16 status; u8 msg_code; @@ -253,11 +303,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, msg_code = MSG_CODE_DEASSERT_INTA + intx; } + spin_lock_irqsave(&ep->lock, flags); status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } + spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | @@ -331,6 +383,51 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, return 0; } +static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, + u16 interrupt_num) +{ + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 tbl_offset, msg_data, reg; + struct cdns_pcie *pcie = &ep->pcie; + struct pci_epf_msix_tbl *msix_tbl; + struct cdns_pcie_epf *epf; + u64 pci_addr_mask = 0xff; + u64 msg_addr; + u16 flags; + u8 bir; + + /* Check whether the MSI-X feature has been enabled by the PCI host. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); + if (!(flags & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + reg = cap + PCI_MSIX_TABLE; + tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); + bir = tbl_offset & PCI_MSIX_TABLE_BIR; + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + + epf = &ep->epf[fn]; + msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + + /* Set the outbound region if needed. */ + if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn) { + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, 0, + false, + ep->irq_phys_addr, + msg_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); + + return 0; +} + static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u16 interrupt_num) @@ -344,6 +441,9 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); + default: break; } @@ -355,8 +455,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; + struct device *dev = pcie->dev; struct pci_epf *epf; u32 cfg; + int ret; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -367,13 +469,19 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + return 0; } static const struct pci_epc_features cdns_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, }; static const struct pci_epc_features* @@ -390,6 +498,8 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, + .set_msix = cdns_pcie_ep_set_msix, + .get_msix = cdns_pcie_ep_get_msix, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, .get_features = cdns_pcie_ep_get_features, @@ -440,8 +550,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); - ret = PTR_ERR(epc); - goto err_init; + return PTR_ERR(epc); } epc_set_drvdata(epc, ep); @@ -449,11 +558,16 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; + ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), + GFP_KERNEL); + if (!ep->epf) + return -ENOMEM; + ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res), PAGE_SIZE); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); - goto err_init; + return ret; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, @@ -466,14 +580,12 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); + spin_lock_init(&ep->lock); return 0; free_epc_mem: pci_epc_mem_exit(epc); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8c2543f28ba0..8d86560196aa 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -3,16 +3,28 @@ // Cadence PCIe host controller driver. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> +#include <linux/delay.h> #include <linux/kernel.h> +#include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include "pcie-cadence.h" -static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) +static u64 bar_max_size[] = { + [RP_BAR0] = _ULL(128 * SZ_2G), + [RP_BAR1] = SZ_2G, + [RP_NO_BAR] = _BITULL(63), +}; + +static u8 bar_aperture_mask[] = { + [RP_BAR0] = 0x1F, + [RP_BAR1] = 0xF, +}; + +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); @@ -70,6 +82,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -89,8 +102,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); @@ -101,18 +118,229 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) return 0; } +static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, u64 size, + unsigned long flags) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 addr0, addr1, aperture, value; + + if (!rc->avail_ib_bar[bar]) + return -EBUSY; + + rc->avail_ib_bar[bar] = false; + + aperture = ilog2(size); + addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); + + if (bar == RP_NO_BAR) + return 0; + + value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG); + value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); + if (size + cpu_addr >= SZ_4G) { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); + } else { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); + } + + value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + + return 0; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size <= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] < bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size >= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] > bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry) +{ + u64 cpu_addr, pci_addr, size, winsize; + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + enum cdns_pcie_rp_bar bar; + unsigned long flags; + int ret; + + cpu_addr = entry->res->start; + pci_addr = entry->res->start - entry->offset; + flags = entry->res->flags; + size = resource_size(entry->res); + + if (entry->offset) { + dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", + pci_addr, cpu_addr); + return -EINVAL; + } + + while (size > 0) { + /* + * Try to find a minimum BAR whose size is greater than + * or equal to the remaining resource_entry size. This will + * fail if the size of each of the available BARs is less than + * the remaining resource_entry size. + * If a minimum BAR is found, IB ATU will be configured and + * exited. + */ + bar = cdns_pcie_host_find_min_bar(rc, size); + if (bar != RP_BAR_UNDEFINED) { + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, + size, flags); + if (ret) + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + /* + * If the control reaches here, it would mean the remaining + * resource_entry size cannot be fitted in a single BAR. So we + * find a maximum BAR whose size is less than or equal to the + * remaining resource_entry size and split the resource entry + * so that part of resource entry is fitted inside the maximum + * BAR. The remaining size would be fitted during the next + * iteration of the loop. + * If a maximum BAR is not found, there is no way we can fit + * this resource_entry, so we error out. + */ + bar = cdns_pcie_host_find_max_bar(rc, size); + if (bar == RP_BAR_UNDEFINED) { + dev_err(dev, "No free BAR to map cpu_addr %llx\n", + cpu_addr); + return -EINVAL; + } + + winsize = bar_max_size[bar]; + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, + flags); + if (ret) { + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + size -= winsize; + cpu_addr += winsize; + } + + return 0; +} + +static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct resource_entry *entry1, *entry2; + + entry1 = container_of(a, struct resource_entry, node); + entry2 = container_of(b, struct resource_entry, node); + + return resource_size(entry2->res) - resource_size(entry1->res); +} + +static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + struct resource_entry *entry; + u32 no_bar_nbits = 32; + int err; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + if (list_empty(&bridge->dma_ranges)) { + of_property_read_u32(np, "cdns,no-bar-match-nbits", + &no_bar_nbits); + err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, + (u64)1 << no_bar_nbits, 0); + if (err) + dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); + return err; + } + + list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + err = cdns_pcie_host_bar_config(rc, entry); + if (err) { + dev_err(dev, "Fail to configure IB using dma-ranges\n"); + return err; + } + } + + return 0; +} + static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; - struct resource *mem_res = pcie->mem_res; struct resource *bus_range = rc->bus_range; struct resource *cfg_res = rc->cfg_res; struct device *dev = pcie->dev; struct device_node *np = dev->of_node; struct of_pci_range_parser parser; + u64 cpu_addr = cfg_res->start; struct of_pci_range range; u32 addr0, addr1, desc1; - u64 cpu_addr; int r, err; /* @@ -125,7 +353,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); - cpu_addr = cfg_res->start - mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -154,16 +384,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) r++; } - /* - * Set Root Port no BAR match Inbound Translation registers: - * needed for MSI and DMA. - * Root Port BAR0 and BAR1 are disabled, hence no need to set their - * inbound translation registers. - */ - addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); - addr1 = 0; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + err = cdns_pcie_host_map_dma_ranges(rc); + if (err) + return err; return 0; } @@ -173,10 +396,16 @@ static int cdns_pcie_host_init(struct device *dev, struct cdns_pcie_rc *rc) { struct resource *bus_range = NULL; + struct pci_host_bridge *bridge; int err; + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); + err = pci_parse_request_of_pci_ranges(dev, resources, + &bridge->dma_ranges, &bus_range); if (err) return err; @@ -198,6 +427,23 @@ static int cdns_pcie_host_init(struct device *dev, return err; } +static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) +{ + struct device *dev = pcie->dev; + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (cdns_pcie_link_up(pcie)) { + dev_info(dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + return -ETIMEDOUT; +} + int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { struct device *dev = rc->pcie.dev; @@ -205,6 +451,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; struct list_head resources; + enum cdns_pcie_rp_bar bar; struct cdns_pcie *pcie; struct resource *res; int ret; @@ -216,9 +463,6 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) pcie = &rc->pcie; pcie->is_rc = true; - rc->no_bar_nbits = 32; - of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); - rc->vendor_id = 0xffff; of_property_read_u32(np, "vendor-id", &rc->vendor_id); @@ -240,22 +484,28 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) } rc->cfg_res = res; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; } - pcie->mem_res = res; + ret = cdns_pcie_host_wait_for_link(pcie); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; ret = cdns_pcie_host_init(dev, &resources, rc); if (ret) - goto err_init; + return ret; list_splice_init(&resources, &bridge->windows); bridge->dev.parent = dev; bridge->busnr = pcie->bus; - bridge->ops = &cdns_pcie_host_ops; + if (!bridge->ops) + bridge->ops = &cdns_pcie_host_ops; bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; @@ -268,8 +518,5 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) err_host_probe: pci_free_resource_list(&resources); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index f5c6bf6dfcb8..6f5f07b3eed1 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -13,6 +13,8 @@ #include <linux/of_device.h> #include "pcie-cadence.h" +#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF + /** * struct cdns_plat_pcie - private data for this PCIe platform driver * @pcie: Cadence PCIe controller @@ -30,6 +32,15 @@ struct cdns_plat_pcie_of_data { static const struct of_device_id cdns_plat_pcie_of_match[]; +static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) +{ + return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR; +} + +static const struct cdns_pcie_ops cdns_plat_ops = { + .cpu_addr_fixup = cdns_plat_cpu_addr_fixup, +}; + static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; @@ -66,6 +77,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) rc = pci_host_bridge_priv(bridge); rc->pcie.dev = dev; + rc->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &rc->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -93,6 +105,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; ep->pcie.dev = dev; + ep->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &ep->pcie; cdns_plat_pcie->is_rc = is_rc; diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index cd795f6fc1e2..8a02981fd456 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -73,7 +73,9 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -100,7 +102,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, } /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index df14ad002fe9..00e44256c3e8 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -10,6 +10,11 @@ #include <linux/pci.h> #include <linux/phy/phy.h> +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + /* * Local Management Registers */ @@ -87,6 +92,20 @@ #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 2) << ((bar) * 8)) /* * Endpoint Function Registers (PCI configuration space for endpoint functions) @@ -94,6 +113,7 @@ #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 +#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 /* * Root Port Registers (PCI configuration space for the root port function) @@ -170,11 +190,19 @@ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) enum cdns_pcie_rp_bar { + RP_BAR_UNDEFINED = -1, RP_BAR0, RP_BAR1, RP_NO_BAR }; +#define CDNS_PCIE_RP_MAX_IB 0x3 + +struct cdns_pcie_rp_ib_bar { + u64 size; + bool free; +}; + /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) @@ -223,12 +251,21 @@ enum cdns_pcie_msg_routing { MSG_ROUTING_GATHER, }; +struct cdns_pcie_ops { + int (*start_link)(struct cdns_pcie *pcie); + void (*stop_link)(struct cdns_pcie *pcie); + bool (*link_up)(struct cdns_pcie *pcie); + u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); +}; + /** * struct cdns_pcie - private data for Cadence PCIe controller drivers * @reg_base: IO mapped register base * @mem_res: start/end offsets in the physical system memory to map PCI accesses * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. * @bus: In Root Complex mode, the bus number + * @ops: Platform specific ops to control various inputs from Cadence PCIe + * wrapper */ struct cdns_pcie { void __iomem *reg_base; @@ -239,7 +276,7 @@ struct cdns_pcie { int phy_count; struct phy **phy; struct device_link **link; - const struct cdns_pcie_common_ops *ops; + const struct cdns_pcie_ops *ops; }; /** @@ -251,19 +288,27 @@ struct cdns_pcie { * @bus_range: first/last buses behind the PCIe host controller * @cfg_base: IO mapped window to access the PCI configuration space of a * single function at a time - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address - * translation (nbits sets into the "no BAR match" register) * @vendor_id: PCI vendor ID * @device_id: PCI device ID + * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or + * available */ struct cdns_pcie_rc { struct cdns_pcie pcie; struct resource *cfg_res; struct resource *bus_range; void __iomem *cfg_base; - u32 no_bar_nbits; u32 vendor_id; u32 device_id; + bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; +}; + +/** + * struct cdns_pcie_epf - Structure to hold info about endpoint function + * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers + */ +struct cdns_pcie_epf { + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; /** @@ -282,6 +327,10 @@ struct cdns_pcie_rc { * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. + * @lock: spin lock to disable interrupts while modifying PCIe controller + * registers fields (RMW) accessible by both remote RC and EP to + * minimize time between read and write + * @epf: Structure to hold info about endpoint function */ struct cdns_pcie_ep { struct cdns_pcie pcie; @@ -293,54 +342,95 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; + /* protect writing to PCI_STATUS while raising legacy interrupts */ + spinlock_t lock; + struct cdns_pcie_epf *epf; }; /* Register access */ -static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) { - writeb(value, pcie->reg_base + reg); + writel(value, pcie->reg_base + reg); } -static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) { - writew(value, pcie->reg_base + reg); + return readl(pcie->reg_base + reg); } -static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) { - writel(value, pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 val = readl(aligned_addr); + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return 0; + } + + if (size > 2) + return val; + + return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); } -static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) { - return readl(pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 mask; + u32 val; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return; + } + + if (size > 2) { + writel(value, addr); + return; + } + + mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); + val = readl(aligned_addr) & mask; + val |= value << (offset * 8); + writel(val, aligned_addr); } /* Root Port register access */ static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } /* Endpoint Function register access */ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, @@ -349,14 +439,11 @@ static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } -static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) { - return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + return cdns_pcie_read_sz(addr, 0x2); } static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) @@ -364,13 +451,43 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } +static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->start_link) + return pcie->ops->start_link(pcie); + + return 0; +} + +static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->stop_link) + pcie->ops->stop_link(pcie); +} + +static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) +{ + if (pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return true; +} + #ifdef CONFIG_PCIE_CADENCE_HOST int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); #else static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { return 0; } + +static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + return NULL; +} #endif #ifdef CONFIG_PCIE_CADENCE_EP |