aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
authorDaniel Vetter2015-11-23 09:04:05 +0100
committerDaniel Vetter2015-11-23 09:04:05 +0100
commit92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch)
tree15626ff9287e37c3cb81c7286d6db5a7fd77c854 /drivers/pci
parent15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff)
parent1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff)
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2 Backmerge to get at commit 1b0e3a049efe471c399674fd954500ce97438d30 Author: Imre Deak <imre.deak@intel.com> Date: Thu Nov 5 23:04:11 2015 +0200 drm/i915/skl: disable display side power well support for now so that we can proplery re-eanble skl power wells in -next. Conflicts are just adjacent lines changed, except for intel_fbdev.c where we need to interleave the changs. Nothing nefarious. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/host/Kconfig33
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-dra7xx.c7
-rw-r--r--drivers/pci/host/pci-exynos.c5
-rw-r--r--drivers/pci/host/pci-host-generic.c41
-rw-r--r--drivers/pci/host/pci-imx6.c5
-rw-r--r--drivers/pci/host/pci-keystone-dw.c8
-rw-r--r--drivers/pci/host/pci-keystone.h2
-rw-r--r--drivers/pci/host/pci-layerscape.c199
-rw-r--r--drivers/pci/host/pci-mvebu.c473
-rw-r--r--drivers/pci/host/pci-tegra.c4
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pci-xgene.c22
-rw-r--r--drivers/pci/host/pcie-altera-msi.c314
-rw-r--r--drivers/pci/host/pcie-altera.c579
-rw-r--r--drivers/pci/host/pcie-designware.c375
-rw-r--r--drivers/pci/host/pcie-designware.h20
-rw-r--r--drivers/pci/host/pcie-hisi.c198
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c27
-rw-r--r--drivers/pci/host/pcie-iproc.c163
-rw-r--r--drivers/pci/host/pcie-iproc.h20
-rw-r--r--drivers/pci/host/pcie-rcar.c86
-rw-r--r--drivers/pci/host/pcie-spear13xx.c24
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c75
-rw-r--r--drivers/pci/iov.c101
-rw-r--r--drivers/pci/msi.c101
-rw-r--r--drivers/pci/of.c13
-rw-r--r--drivers/pci/pci-driver.c22
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c295
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c28
-rw-r--r--drivers/pci/probe.c81
-rw-r--r--drivers/pci/quirks.c58
-rw-r--r--drivers/pci/setup-bus.c50
-rw-r--r--drivers/pci/setup-res.c7
36 files changed, 2769 insertions, 676 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d5e58bae95cf..f131ba947dc6 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -39,7 +39,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ depends on ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -47,7 +48,8 @@ config PCI_RCAR_GEN2
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
- depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ depends on ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
@@ -105,7 +107,7 @@ config PCI_XGENE_MSI
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
- depends on OF && ARM
+ depends on OF && (ARM || ARCH_LAYERSCAPE)
select PCIE_DW
select MFD_SYSCON
help
@@ -145,4 +147,29 @@ config PCIE_IPROC_BCMA
Say Y here if you want to use the Broadcom iProc PCIe controller
through the BCMA bus interface
+config PCIE_ALTERA
+ bool "Altera PCIe controller"
+ depends on ARM || NIOS2
+ depends on OF_PCI
+ select PCI_DOMAINS
+ help
+ Say Y here if you want to enable PCIe controller support on Altera
+ FPGA.
+
+config PCIE_ALTERA_MSI
+ bool "Altera PCIe MSI feature"
+ depends on PCIE_ALTERA && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PCIe MSI support for the Altera FPGA.
+ This MSI driver supports Altera MSI to GIC controller IP.
+
+config PCI_HISI
+ depends on OF && ARM64
+ bool "HiSilicon SoC HIP05 PCIe controller"
+ select PCIEPORTBUS
+ select PCIE_DW
+ help
+ Say Y here if you want PCIe controller support on HiSilicon HIP05 SoC
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 140d66f796e4..9d4d3c6924a1 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
+obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 199e29a044cd..8c3688046c02 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -62,6 +62,7 @@
#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
#define LINK_UP BIT(16)
+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
void __iomem *base;
@@ -151,6 +152,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
dw_pcie_setup_rc(pp);
+
+ pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
+
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index f9f468d9a819..01095e1160a4 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -454,7 +454,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_r_mode(pp, true);
- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_r_mode(pp, false);
return ret;
}
@@ -465,8 +465,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_w_mode(pp, true);
- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
- where, size, val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_w_mode(pp, false);
return ret;
}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 265dd25169bf..5434c90db243 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -27,7 +27,7 @@
struct gen_pci_cfg_bus_ops {
u32 bus_shift;
- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
+ struct pci_ops ops;
};
struct gen_pci_cfg_windows {
@@ -35,7 +35,7 @@ struct gen_pci_cfg_windows {
struct resource *bus_range;
void __iomem **win;
- const struct gen_pci_cfg_bus_ops *ops;
+ struct gen_pci_cfg_bus_ops *ops;
};
/*
@@ -65,7 +65,11 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
- .map_bus = gen_pci_map_cfg_bus_cam,
+ .ops = {
+ .map_bus = gen_pci_map_cfg_bus_cam,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
@@ -80,12 +84,11 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
.bus_shift = 20,
- .map_bus = gen_pci_map_cfg_bus_ecam,
-};
-
-static struct pci_ops gen_pci_ops = {
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
+ .ops = {
+ .map_bus = gen_pci_map_cfg_bus_ecam,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
static const struct of_device_id gen_pci_of_match[] = {
@@ -166,6 +169,7 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
struct resource *bus_range;
struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
err = of_address_to_resource(np, 0, &pci->cfg.res);
if (err) {
@@ -193,10 +197,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
bus_range = pci->cfg.bus_range;
for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
u32 idx = busn - bus_range->start;
- u32 sz = 1 << pci->cfg.ops->bus_shift;
pci->cfg.win[idx] = devm_ioremap(dev,
- pci->cfg.res.start + busn * sz,
+ pci->cfg.res.start + idx * sz,
sz);
if (!pci->cfg.win[idx])
return -ENOMEM;
@@ -210,7 +213,6 @@ static int gen_pci_probe(struct platform_device *pdev)
int err;
const char *type;
const struct of_device_id *of_id;
- const int *prop;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -225,17 +227,10 @@ static int gen_pci_probe(struct platform_device *pdev)
return -EINVAL;
}
- prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
- if (prop) {
- if (*prop)
- pci_add_flags(PCI_PROBE_ONLY);
- else
- pci_clear_flags(PCI_PROBE_ONLY);
- }
+ of_pci_check_probe_only();
of_id = of_match_node(gen_pci_of_match, np);
- pci->cfg.ops = of_id->data;
- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
+ pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
pci->host.dev.parent = dev;
INIT_LIST_HEAD(&pci->host.windows);
INIT_LIST_HEAD(&pci->resources);
@@ -256,7 +251,9 @@ static int gen_pci_probe(struct platform_device *pdev)
if (!pci_has_flag(PCI_PROBE_ONLY))
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
- bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources);
+
+ bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
+ &pci->cfg.ops->ops, pci, &pci->resources);
if (!bus) {
dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 8f3a9813c4e5..22e8224126fd 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -74,6 +74,7 @@ struct imx6_pcie {
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
@@ -503,7 +504,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
- if (rx_valid & 0x01)
+ if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID)
return 0;
if ((debug_r0 & 0x3f) != 0x0d)
@@ -539,7 +540,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
IRQF_SHARED, "mx6-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
- return -ENODEV;
+ return ret;
}
}
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index e71da991949b..ed34c9520a02 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -70,7 +70,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
*bit_pos = offset >> 3;
}
-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
@@ -322,7 +322,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
- u32 start = pp->mem.start, end = pp->mem.end;
+ u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
/* Disable BARs for inbound access */
@@ -398,7 +398,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
- return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
+ return dw_pcie_cfg_read(addr + where, size, val);
}
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
@@ -410,7 +410,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
- return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
+ return dw_pcie_cfg_write(addr + where, size, val);
}
/**
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index 478d932b602d..f0944e8c4b02 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -37,7 +37,7 @@ struct keystone_pcie {
/* Keystone DW specific MSI controller APIs/definitions */
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index b2328ea13dcf..3923bed93c7e 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Freescale Semiconductor.
*
- * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_pci.h>
@@ -32,27 +31,60 @@
#define LTSSM_STATE_MASK 0x3f
#define LTSSM_PCIE_L0 0x11 /* L0 state */
-/* Symbol Timer Register and Filter Mask Register 1 */
-#define PCIE_STRFMR1 0x71c
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
+
+/* PEX LUT registers */
+#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
+
+struct ls_pcie_drvdata {
+ u32 lut_offset;
+ u32 ltssm_shift;
+ struct pcie_host_ops *ops;
+};
struct ls_pcie {
- struct list_head node;
- struct device *dev;
- struct pci_bus *bus;
void __iomem *dbi;
+ void __iomem *lut;
struct regmap *scfg;
struct pcie_port pp;
+ const struct ls_pcie_drvdata *drvdata;
int index;
- int msi_irq;
};
#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp)
-static int ls_pcie_link_up(struct pcie_port *pp)
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+ u32 header_type;
+
+ header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
+}
+
+/* Fix class value */
+static void ls_pcie_fix_class(struct ls_pcie *pcie)
+{
+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+}
+
+static int ls1021_pcie_link_up(struct pcie_port *pp)
{
u32 state;
struct ls_pcie *pcie = to_ls_pcie(pp);
+ if (!pcie->scfg)
+ return 0;
+
regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
@@ -62,27 +94,27 @@ static int ls_pcie_link_up(struct pcie_port *pp)
return 1;
}
-static int ls_pcie_establish_link(struct pcie_port *pp)
+static void ls1021_pcie_host_init(struct pcie_port *pp)
{
- unsigned int retries;
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+ u32 val, index[2];
- for (retries = 0; retries < 200; retries++) {
- if (dw_pcie_link_up(pp))
- return 0;
- usleep_range(100, 1000);
+ pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+ "fsl,pcie-scfg");
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(pp->dev, "No syscfg phandle specified\n");
+ pcie->scfg = NULL;
+ return;
}
- dev_err(pp->dev, "phy link never came up\n");
- return -EINVAL;
-}
-
-static void ls_pcie_host_init(struct pcie_port *pp)
-{
- struct ls_pcie *pcie = to_ls_pcie(pp);
- u32 val;
+ if (of_property_read_u32_array(pp->dev->of_node,
+ "fsl,pcie-scfg", index, 2)) {
+ pcie->scfg = NULL;
+ return;
+ }
+ pcie->index = index[1];
dw_pcie_setup_rc(pp);
- ls_pcie_establish_link(pp);
/*
* LS1021A Workaround for internal TKT228622
@@ -93,21 +125,97 @@ static void ls_pcie_host_init(struct pcie_port *pp)
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
}
+static int ls_pcie_link_up(struct pcie_port *pp)
+{
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+ u32 state;
+
+ state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+ pcie->drvdata->ltssm_shift) &
+ LTSSM_STATE_MASK;
+
+ if (state < LTSSM_PCIE_L0)
+ return 0;
+
+ return 1;
+}
+
+static void ls_pcie_host_init(struct pcie_port *pp)
+{
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+
+ iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ ls_pcie_fix_class(pcie);
+ ls_pcie_clear_multifunction(pcie);
+ iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+}
+
+static int ls_pcie_msi_host_init(struct pcie_port *pp,
+ struct msi_controller *chip)
+{
+ struct device_node *msi_node;
+ struct device_node *np = pp->dev->of_node;
+
+ /*
+ * The MSI domain is set by the generic of_msi_configure(). This
+ * .msi_host_init() function keeps us from doing the default MSI
+ * domain setup in dw_pcie_host_init() and also enforces the
+ * requirement that "msi-parent" exists.
+ */
+ msi_node = of_parse_phandle(np, "msi-parent", 0);
+ if (!msi_node) {
+ dev_err(pp->dev, "failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pcie_host_ops ls1021_pcie_host_ops = {
+ .link_up = ls1021_pcie_link_up,
+ .host_init = ls1021_pcie_host_init,
+ .msi_host_init = ls_pcie_msi_host_init,
+};
+
static struct pcie_host_ops ls_pcie_host_ops = {
.link_up = ls_pcie_link_up,
.host_init = ls_pcie_host_init,
+ .msi_host_init = ls_pcie_msi_host_init,
+};
+
+static struct ls_pcie_drvdata ls1021_drvdata = {
+ .ops = &ls1021_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1043_drvdata = {
+ .lut_offset = 0x10000,
+ .ltssm_shift = 24,
+ .ops = &ls_pcie_host_ops,
};
-static int ls_add_pcie_port(struct ls_pcie *pcie)
+static struct ls_pcie_drvdata ls2080_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 0,
+ .ops = &ls_pcie_host_ops,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
+
+static int __init ls_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
{
- struct pcie_port *pp;
int ret;
+ struct ls_pcie *pcie = to_ls_pcie(pp);
- pp = &pcie->pp;
- pp->dev = pcie->dev;
+ pp->dev = &pdev->dev;
pp->dbi_base = pcie->dbi;
- pp->root_bus_nr = -1;
- pp->ops = &ls_pcie_host_ops;
+ pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -120,17 +228,19 @@ static int ls_add_pcie_port(struct ls_pcie *pcie)
static int __init ls_pcie_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct ls_pcie *pcie;
struct resource *dbi_base;
- u32 index[2];
int ret;
+ match = of_match_device(ls_pcie_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
-
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
if (IS_ERR(pcie->dbi)) {
@@ -138,20 +248,13 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->dbi);
}
- pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- dev_err(&pdev->dev, "No syscfg phandle specified\n");
- return PTR_ERR(pcie->scfg);
- }
+ pcie->drvdata = match->data;
+ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
- pcie->index = index[1];
+ if (!ls_pcie_is_bridge(pcie))
+ return -ENODEV;
- ret = ls_add_pcie_port(pcie);
+ ret = ls_add_pcie_port(&pcie->pp, pdev);
if (ret < 0)
return ret;
@@ -160,12 +263,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1021a-pcie" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
-
static struct platform_driver ls_pcie_driver = {
.driver = {
.name = "layerscape-pcie",
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 67ec5e1c99db..53b79c5f0559 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -30,6 +30,7 @@
#define PCIE_DEV_REV_OFF 0x0008
#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_CAP_PCIEXP 0x0060
#define PCIE_HEADER_LOG_4_OFF 0x0128
#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
@@ -57,14 +58,35 @@
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_RC_RTSTA 0x1a14
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
+enum {
+ PCISWCAP = PCI_BRIDGE_CONTROL + 2,
+ PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
+ PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
+ PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
+ PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
+ PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
+ PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
+ PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
+ PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
+ PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
+ PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
+ PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
+ PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
+ PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
+ PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
+ PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
+};
+
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
u16 device;
u16 command;
+ u16 status;
u16 class;
u8 interface;
u8 revision;
@@ -84,13 +106,15 @@ struct mvebu_sw_pci_bridge {
u16 memlimit;
u16 iobaseupper;
u16 iolimitupper;
- u8 cappointer;
- u8 reserved1;
- u16 reserved2;
u32 romaddr;
u8 intline;
u8 intpin;
u16 bridgectrl;
+
+ /* PCI express capability */
+ u32 pcie_sltcap;
+ u16 pcie_devctl;
+ u16 pcie_rtctl;
};
struct mvebu_pcie_port;
@@ -119,8 +143,7 @@ struct mvebu_pcie_port {
unsigned int io_target;
unsigned int io_attr;
struct clk *clk;
- int reset_gpio;
- int reset_active_low;
+ struct gpio_desc *reset_gpio;
char *reset_name;
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
@@ -254,15 +277,22 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
+ void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
- *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
-
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
+ switch (size) {
+ case 1:
+ *val = readb_relaxed(conf_data + (where & 3));
+ break;
+ case 2:
+ *val = readw_relaxed(conf_data + (where & 2));
+ break;
+ case 4:
+ *val = readl_relaxed(conf_data);
+ break;
+ }
return PCIBIOS_SUCCESSFUL;
}
@@ -271,22 +301,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- u32 _val, shift = 8 * (where & 3);
+ void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
- _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
- if (size == 4)
- _val = val;
- else if (size == 2)
- _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
- else if (size == 1)
- _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
- else
+ switch (size) {
+ case 1:
+ writeb(val, conf_data + (where & 3));
+ break;
+ case 2:
+ writew(val, conf_data + (where & 2));
+ break;
+ case 4:
+ writel(val, conf_data);
+ break;
+ default:
return PCIBIOS_BAD_REGISTER_NUMBER;
-
- mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+ }
return PCIBIOS_SUCCESSFUL;
}
@@ -443,6 +475,9 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
/* We support 32 bits I/O addressing */
bridge->iobase = PCI_IO_RANGE_TYPE_32;
bridge->iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Add capabilities */
+ bridge->status = PCI_STATUS_CAP_LIST;
}
/*
@@ -460,7 +495,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
break;
case PCI_COMMAND:
- *value = bridge->command;
+ *value = bridge->command | bridge->status << 16;
break;
case PCI_CLASS_REVISION:
@@ -505,6 +540,10 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
break;
+ case PCI_CAPABILITY_LIST:
+ *value = PCISWCAP;
+ break;
+
case PCI_ROM_ADDRESS1:
*value = 0;
break;
@@ -514,9 +553,67 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = 0;
break;
+ case PCISWCAP_EXP_LIST_ID:
+ /* Set PCIe v2, root port, slot support */
+ *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
+ break;
+
+ case PCISWCAP_EXP_DEVCAP:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
+ break;
+
+ case PCISWCAP_EXP_DEVCTL:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
+ ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+ *value |= bridge->pcie_devctl;
+ break;
+
+ case PCISWCAP_EXP_LNKCAP:
+ /*
+ * PCIe requires the clock power management capability to be
+ * hard-wired to zero for downstream ports
+ */
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
+ ~PCI_EXP_LNKCAP_CLKPM;
+ break;
+
+ case PCISWCAP_EXP_LNKCTL:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ break;
+
+ case PCISWCAP_EXP_SLTCAP:
+ *value = bridge->pcie_sltcap;
+ break;
+
+ case PCISWCAP_EXP_SLTCTL:
+ *value = PCI_EXP_SLTSTA_PDS << 16;
+ break;
+
+ case PCISWCAP_EXP_RTCTL:
+ *value = bridge->pcie_rtctl;
+ break;
+
+ case PCISWCAP_EXP_RTSTA:
+ *value = mvebu_readl(port, PCIE_RC_RTSTA);
+ break;
+
+ /* PCIe requires the v2 fields to be hard-wired to zero */
+ case PCISWCAP_EXP_DEVCAP2:
+ case PCISWCAP_EXP_DEVCTL2:
+ case PCISWCAP_EXP_LNKCAP2:
+ case PCISWCAP_EXP_LNKCTL2:
+ case PCISWCAP_EXP_SLTCAP2:
+ case PCISWCAP_EXP_SLTCTL2:
default:
- *value = 0xffffffff;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ /*
+ * PCI defines configuration read accesses to reserved or
+ * unimplemented registers to read as zero and complete
+ * normally.
+ */
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
}
if (size == 2)
@@ -601,6 +698,51 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
break;
+ case PCISWCAP_EXP_DEVCTL:
+ /*
+ * Armada370 data says these bits must always
+ * be zero when in root complex mode.
+ */
+ value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+
+ /*
+ * If the mask is 0xffff0000, then we only want to write
+ * the device control register, rather than clearing the
+ * RW1C bits in the device status register. Mask out the
+ * status register bits.
+ */
+ if (mask == 0xffff0000)
+ value &= 0xffff;
+
+ mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ break;
+
+ case PCISWCAP_EXP_LNKCTL:
+ /*
+ * If we don't support CLKREQ, we must ensure that the
+ * CLKREQ enable bit always reads zero. Since we haven't
+ * had this capability, and it's dependent on board wiring,
+ * disable it for the time being.
+ */
+ value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+
+ /*
+ * If the mask is 0xffff0000, then we only want to write
+ * the link control register, rather than clearing the
+ * RW1C bits in the link status register. Mask out the
+ * status register bits.
+ */
+ if (mask == 0xffff0000)
+ value &= 0xffff;
+
+ mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ break;
+
+ case PCISWCAP_EXP_RTSTA:
+ mvebu_writel(port, value, PCIE_RC_RTSTA);
+ break;
+
default:
break;
}
@@ -652,17 +794,6 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * On the secondary bus, we don't want to expose any other
- * device than the device physically connected in the PCIe
- * slot, visible in slot 0. In slot 1, there's a special
- * Marvell device that only makes sense when the Armada is
- * used as a PCIe endpoint.
- */
- if (bus->number == port->bridge.secondary_bus &&
- PCI_SLOT(devfn) != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
where, size, val);
@@ -693,19 +824,6 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
return PCIBIOS_DEVICE_NOT_FOUND;
}
- /*
- * On the secondary bus, we don't want to expose any other
- * device than the device physically connected in the PCIe
- * slot, visible in slot 0. In slot 1, there's a special
- * Marvell device that only makes sense when the Armada is
- * used as a PCIe endpoint.
- */
- if (bus->number == port->bridge.secondary_bus &&
- PCI_SLOT(devfn) != 0) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
where, size, val);
@@ -914,12 +1032,167 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
+static void mvebu_pcie_port_clk_put(void *data)
+{
+ struct mvebu_pcie_port *port = data;
+
+ clk_put(port->clk);
+}
+
+static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
+ struct mvebu_pcie_port *port, struct device_node *child)
+{
+ struct device *dev = &pcie->pdev->dev;
+ enum of_gpio_flags flags;
+ int reset_gpio, ret;
+
+ port->pcie = pcie;
+
+ if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
+ dev_warn(dev, "ignoring %s, missing pcie-port property\n",
+ of_node_full_name(child));
+ goto skip;
+ }
+
+ if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
+ port->lane = 0;
+
+ port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
+ port->lane);
+ if (!port->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ port->devfn = of_pci_get_devfn(child);
+ if (port->devfn < 0)
+ goto skip;
+
+ ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
+ &port->mem_target, &port->mem_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
+ port->name);
+ goto skip;
+ }
+
+ if (resource_size(&pcie->io) != 0) {
+ mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ } else {
+ port->io_target = -1;
+ port->io_attr = -1;
+ }
+
+ reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
+ if (reset_gpio == -EPROBE_DEFER) {
+ ret = reset_gpio;
+ goto err;
+ }
+
+ if (gpio_is_valid(reset_gpio)) {
+ unsigned long gpio_flags;
+
+ port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
+ port->name);
+ if (!port->reset_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (flags & OF_GPIO_ACTIVE_LOW) {
+ dev_info(dev, "%s: reset gpio is active low\n",
+ of_node_full_name(child));
+ gpio_flags = GPIOF_ACTIVE_LOW |
+ GPIOF_OUT_INIT_LOW;
+ } else {
+ gpio_flags = GPIOF_OUT_INIT_HIGH;
+ }
+
+ ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
+ port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ goto skip;
+ }
+
+ port->reset_gpio = gpio_to_desc(reset_gpio);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "%s: cannot get clock\n", port->name);
+ goto skip;
+ }
+
+ ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0) {
+ clk_put(port->clk);
+ goto err;
+ }
+
+ return 1;
+
+skip:
+ ret = 0;
+
+ /* In the case of skipping, we need to free these */
+ devm_kfree(dev, port->reset_name);
+ port->reset_name = NULL;
+ devm_kfree(dev, port->name);
+ port->name = NULL;
+
+err:
+ return ret;
+}
+
+/*
+ * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
+ * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
+ * of the PCI Express Card Electromechanical Specification, 1.1.
+ */
+static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
+{
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret < 0)
+ return ret;
+
+ if (port->reset_gpio) {
+ u32 reset_udelay = 20000;
+
+ of_property_read_u32(port->dn, "reset-delay-us",
+ &reset_udelay);
+
+ udelay(100);
+
+ gpiod_set_value_cansleep(port->reset_gpio, 0);
+ msleep(reset_udelay / 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * Power down a PCIe port. Strictly, PCIe requires us to place the card
+ * in D3hot state before asserting PERST#.
+ */
+static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
+{
+ if (port->reset_gpio)
+ gpiod_set_value_cansleep(port->reset_gpio, 1);
+
+ clk_disable_unprepare(port->clk);
+}
+
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- int i, ret;
+ int num, i, ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
GFP_KERNEL);
@@ -955,112 +1228,52 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
return ret;
}
- i = 0;
- for_each_child_of_node(pdev->dev.of_node, child) {
- if (!of_device_is_available(child))
- continue;
- i++;
- }
+ num = of_get_available_child_count(pdev->dev.of_node);
- pcie->ports = devm_kzalloc(&pdev->dev, i *
- sizeof(struct mvebu_pcie_port),
+ pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
- enum of_gpio_flags flags;
-
- if (!of_device_is_available(child))
- continue;
- port->pcie = pcie;
-
- if (of_property_read_u32(child, "marvell,pcie-port",
- &port->port)) {
- dev_warn(&pdev->dev,
- "ignoring PCIe DT node, missing pcie-port property\n");
- continue;
- }
-
- if (of_property_read_u32(child, "marvell,pcie-lane",
- &port->lane))
- port->lane = 0;
-
- port->name = kasprintf(GFP_KERNEL, "pcie%d.%d",
- port->port, port->lane);
-
- port->devfn = of_pci_get_devfn(child);
- if (port->devfn < 0)
- continue;
-
- ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM,
- &port->mem_target, &port->mem_attr);
+ ret = mvebu_pcie_parse_port(pcie, port, child);
if (ret < 0) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n",
- port->port, port->lane);
+ of_node_put(child);
+ return ret;
+ } else if (ret == 0) {
continue;
}
- if (resource_size(&pcie->io) != 0)
- mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
- &port->io_target, &port->io_attr);
- else {
- port->io_target = -1;
- port->io_attr = -1;
- }
+ port->dn = child;
+ i++;
+ }
+ pcie->nports = i;
- port->reset_gpio = of_get_named_gpio_flags(child,
- "reset-gpios", 0, &flags);
- if (gpio_is_valid(port->reset_gpio)) {
- u32 reset_udelay = 20000;
-
- port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
- port->reset_name = kasprintf(GFP_KERNEL,
- "pcie%d.%d-reset", port->port, port->lane);
- of_property_read_u32(child, "reset-delay-us",
- &reset_udelay);
-
- ret = devm_gpio_request_one(&pdev->dev,
- port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- return ret;
- continue;
- }
-
- gpio_set_value(port->reset_gpio,
- (port->reset_active_low) ? 1 : 0);
- msleep(reset_udelay/1000);
- }
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
- port->clk = of_clk_get_by_name(child, NULL);
- if (IS_ERR(port->clk)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
- port->port, port->lane);
+ child = port->dn;
+ if (!child)
continue;
- }
- ret = clk_prepare_enable(port->clk);
- if (ret)
+ ret = mvebu_pcie_powerup(port);
+ if (ret < 0)
continue;
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
- port->port, port->lane);
+ dev_err(&pdev->dev, "%s: cannot map registers\n",
+ port->name);
port->base = NULL;
- clk_disable_unprepare(port->clk);
+ mvebu_pcie_powerdown(port);
continue;
}
mvebu_pcie_set_local_dev_nr(port, 1);
-
- port->dn = child;
mvebu_sw_pci_bridge_init(port);
- i++;
}
pcie->nports = i;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 81df0c1fe063..3018ae52e092 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -382,8 +382,8 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
- pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
- L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
+ pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index e491681daf22..a6456b578269 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -256,7 +256,7 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(msi->node,
+ msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
&xgene_msi_domain_info,
msi->inner_domain);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0236ab9d5720..ae00ce22d5a6 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -509,24 +509,6 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
return 0;
}
-static int xgene_pcie_msi_enable(struct pci_bus *bus)
-{
- struct device_node *msi_node;
-
- msi_node = of_parse_phandle(bus->dev.of_node,
- "msi-parent", 0);
- if (!msi_node)
- return -ENODEV;
-
- bus->msi = of_pci_find_msi_chip_by_node(msi_node);
- if (!bus->msi)
- return -ENODEV;
-
- of_node_put(msi_node);
- bus->msi->dev = &bus->dev;
- return 0;
-}
-
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
@@ -567,10 +549,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- if (xgene_pcie_msi_enable(bus))
- dev_info(port->dev, "failed to enable MSI\n");
-
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c
new file mode 100644
index 000000000000..99177f4ccde2
--- /dev/null
+++ b/drivers/pci/host/pcie-altera-msi.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MSI_STATUS 0x0
+#define MSI_ERROR 0x4
+#define MSI_INTMASK 0x8
+
+#define MAX_MSI_VECTORS 32
+
+struct altera_msi {
+ DECLARE_BITMAP(used, MAX_MSI_VECTORS);
+ struct mutex lock; /* protect "used" bitmap */
+ struct platform_device *pdev;
+ struct irq_domain *msi_domain;
+ struct irq_domain *inner_domain;
+ void __iomem *csr_base;
+ void __iomem *vector_base;
+ phys_addr_t vector_phy;
+ u32 num_of_vectors;
+ int irq;
+};
+
+static inline void msi_writel(struct altera_msi *msi, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, msi->csr_base + reg);
+}
+
+static inline u32 msi_readl(struct altera_msi *msi, const u32 reg)
+{
+ return readl_relaxed(msi->csr_base + reg);
+}
+
+static void altera_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_msi *msi;
+ unsigned long status;
+ u32 num_of_vectors;
+ u32 bit;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+ msi = irq_desc_get_handler_data(desc);
+ num_of_vectors = msi->num_of_vectors;
+
+ while ((status = msi_readl(msi, MSI_STATUS)) != 0) {
+ for_each_set_bit(bit, &status, msi->num_of_vectors) {
+ /* Dummy read from vector to clear the interrupt */
+ readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
+
+ virq = irq_find_mapping(msi->inner_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(&msi->pdev->dev, "unexpected MSI\n");
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip altera_msi_irq_chip = {
+ .name = "Altera PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info altera_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &altera_msi_irq_chip,
+};
+
+static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct altera_msi *msi = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int altera_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip altera_msi_bottom_irq_chip = {
+ .name = "Altera MSI",
+ .irq_compose_msi_msg = altera_compose_msi_msg,
+ .irq_set_affinity = altera_msi_set_affinity,
+};
+
+static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct altera_msi *msi = domain->host_data;
+ unsigned long bit;
+ u32 mask;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->used, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask |= 1 << bit;
+ msi_writel(msi, mask, MSI_INTMASK);
+
+ return 0;
+}
+
+static void altera_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct altera_msi *msi = irq_data_get_irq_chip_data(d);
+ u32 mask;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->used)) {
+ dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->used);
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask &= ~(1 << d->hwirq);
+ msi_writel(msi, mask, MSI_INTMASK);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = altera_irq_domain_alloc,
+ .free = altera_irq_domain_free,
+};
+
+static int altera_allocate_domains(struct altera_msi *msi)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &altera_msi_domain_info, msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void altera_free_domains(struct altera_msi *msi)
+{
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+}
+
+static int altera_msi_remove(struct platform_device *pdev)
+{
+ struct altera_msi *msi = platform_get_drvdata(pdev);
+
+ msi_writel(msi, 0, MSI_INTMASK);
+ irq_set_chained_handler(msi->irq, NULL);
+ irq_set_handler_data(msi->irq, NULL);
+
+ altera_free_domains(msi);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static int altera_msi_probe(struct platform_device *pdev)
+{
+ struct altera_msi *msi;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi),
+ GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ mutex_init(&msi->lock);
+ msi->pdev = pdev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+ if (!res) {
+ dev_err(&pdev->dev, "no csr memory resource defined\n");
+ return -ENODEV;
+ }
+
+ msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi->csr_base)) {
+ dev_err(&pdev->dev, "failed to map csr memory\n");
+ return PTR_ERR(msi->csr_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vector_slave");
+ if (!res) {
+ dev_err(&pdev->dev, "no vector_slave memory resource defined\n");
+ return -ENODEV;
+ }
+
+ msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi->vector_base)) {
+ dev_err(&pdev->dev, "failed to map vector_slave memory\n");
+ return PTR_ERR(msi->vector_base);
+ }
+
+ msi->vector_phy = res->start;
+
+ if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) {
+ dev_err(&pdev->dev, "failed to parse the number of vectors\n");
+ return -EINVAL;
+ }
+
+ ret = altera_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ msi->irq = platform_get_irq(pdev, 0);
+ if (msi->irq <= 0) {
+ dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi);
+ platform_set_drvdata(pdev, msi);
+
+ return 0;
+
+err:
+ altera_msi_remove(pdev);
+ return ret;
+}
+
+static const struct of_device_id altera_msi_of_match[] = {
+ { .compatible = "altr,msi-1.0", NULL },
+ { },
+};
+
+static struct platform_driver altera_msi_driver = {
+ .driver = {
+ .name = "altera-msi",
+ .of_match_table = altera_msi_of_match,
+ },
+ .probe = altera_msi_probe,
+ .remove = altera_msi_remove,
+};
+
+static int __init altera_msi_init(void)
+{
+ return platform_driver_register(&altera_msi_driver);
+}
+subsys_initcall(altera_msi_init);
+
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_DESCRIPTION("Altera PCIe MSI support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
new file mode 100644
index 000000000000..e5dda38bdde5
--- /dev/null
+++ b/drivers/pci/host/pcie-altera.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RP_TX_REG0 0x2000
+#define RP_TX_REG1 0x2004
+#define RP_TX_CNTRL 0x2008
+#define RP_TX_EOP 0x2
+#define RP_TX_SOP 0x1
+#define RP_RXCPL_STATUS 0x2010
+#define RP_RXCPL_EOP 0x2
+#define RP_RXCPL_SOP 0x1
+#define RP_RXCPL_REG0 0x2014
+#define RP_RXCPL_REG1 0x2018
+#define P2A_INT_STATUS 0x3060
+#define P2A_INT_STS_ALL 0xf
+#define P2A_INT_ENABLE 0x3070
+#define P2A_INT_ENA_ALL 0xf
+#define RP_LTSSM 0x3c64
+#define LTSSM_L0 0xf
+
+/* TLP configuration type 0 and 1 */
+#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
+#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
+#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
+#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
+#define TLP_PAYLOAD_SIZE 0x01
+#define TLP_READ_TAG 0x1d
+#define TLP_WRITE_TAG 0x10
+#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
+#define TLP_CFG_DW2(bus, devfn, offset) \
+ (((bus) << 24) | ((devfn) << 16) | (offset))
+#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_HDR_SIZE 3
+#define TLP_LOOP 500
+
+#define INTX_NUM 4
+
+#define DWORD_MASK 3
+
+struct altera_pcie {
+ struct platform_device *pdev;
+ void __iomem *cra_base;
+ int irq;
+ u8 root_bus_nr;
+ struct irq_domain *irq_domain;
+ struct resource bus_range;
+ struct list_head resources;
+};
+
+struct tlp_rp_regpair_t {
+ u32 ctrl;
+ u32 reg0;
+ u32 reg1;
+};
+
+static void altera_pcie_retrain(struct pci_dev *dev)
+{
+ u16 linkcap, linkstat;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
+
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
+ pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_RL);
+}
+DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
+
+/*
+ * Altera PCIe port uses BAR0 of RC's configuration space as the translation
+ * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
+ * using these registers, so it can be reached by DMA from EP devices.
+ * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
+ * should be hidden during enumeration to avoid the sizing and resource
+ * allocation by PCIe core.
+ */
+static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && (devfn == 0) &&
+ (offset == PCI_BASE_ADDRESS_0))
+ return true;
+
+ return false;
+}
+
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
+
+static void tlp_write_tx(struct altera_pcie *pcie,
+ struct tlp_rp_regpair_t *tlp_rp_regdata)
+{
+ cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
+ cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
+ cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
+}
+
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0);
+}
+
+static bool altera_pcie_valid_config(struct altera_pcie *pcie,
+ struct pci_bus *bus, int dev)
+{
+ /* If there is no link, then there is no device */
+ if (bus->number != pcie->root_bus_nr) {
+ if (!altera_pcie_link_is_up(pcie))
+ return false;
+ }
+
+ /* access only one slot on each root port */
+ if (bus->number == pcie->root_bus_nr && dev > 0)
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly attached
+ * to root port, root port can only attach to one downstream port.
+ */
+ if (bus->primary == pcie->root_bus_nr && dev > 0)
+ return false;
+
+ return true;
+}
+
+static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+ u8 loop;
+ bool sop = 0;
+ u32 ctrl;
+ u32 reg0, reg1;
+
+ /*
+ * Minimum 2 loops to read TLP headers and 1 loop to read data
+ * payload.
+ */
+ for (loop = 0; loop < TLP_LOOP; loop++) {
+ ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
+ if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
+ reg0 = cra_readl(pcie, RP_RXCPL_REG0);
+ reg1 = cra_readl(pcie, RP_RXCPL_REG1);
+
+ if (ctrl & RP_RXCPL_SOP)
+ sop = true;
+
+ if (ctrl & RP_RXCPL_EOP) {
+ if (value)
+ *value = reg0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+ udelay(5);
+ }
+
+ return -ENOENT;
+}
+
+static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool align)
+{
+ struct tlp_rp_regpair_t tlp_rp_regdata;
+
+ tlp_rp_regdata.reg0 = headers[0];
+ tlp_rp_regdata.reg1 = headers[1];
+ tlp_rp_regdata.ctrl = RP_TX_SOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ if (align) {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = 0;
+ tlp_rp_regdata.ctrl = 0;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ tlp_rp_regdata.reg0 = data;
+ tlp_rp_regdata.reg1 = 0;
+ } else {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = data;
+ }
+
+ tlp_rp_regdata.ctrl = RP_TX_EOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+}
+
+static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 *value)
+{
+ u32 headers[TLP_HDR_SIZE];
+
+ if (bus == pcie->root_bus_nr)
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
+ else
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
+
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+ TLP_READ_TAG, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+
+ tlp_write_packet(pcie, headers, 0, false);
+
+ return tlp_read_packet(pcie, value);
+}
+
+static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 value)
+{
+ u32 headers[TLP_HDR_SIZE];
+ int ret;
+
+ if (bus == pcie->root_bus_nr)
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
+ else
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
+
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+ TLP_WRITE_TAG, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+
+ /* check alignment to Qword */
+ if ((where & 0x7) == 0)
+ tlp_write_packet(pcie, headers, value, true);
+ else
+ tlp_write_packet(pcie, headers, value, false);
+
+ ret = tlp_read_packet(pcie, NULL);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on root port
+ * and update local copy of root bus number accordingly.
+ */
+ if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = (u8)(value);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+ int ret;
+ u32 data;
+ u8 byte_en;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ switch (size) {
+ case 1:
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ byte_en = 0xf;
+ break;
+ }
+
+ ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ (where & ~DWORD_MASK), byte_en, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ switch (size) {
+ case 1:
+ *value = (data >> (8 * (where & 0x3))) & 0xff;
+ break;
+ case 2:
+ *value = (data >> (8 * (where & 0x2))) & 0xffff;
+ break;
+ default:
+ *value = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+ u32 data32;
+ u32 shift = 8 * (where & 3);
+ u8 byte_en;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ data32 = (value & 0xff) << shift;
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ data32 = (value & 0xffff) << shift;
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ data32 = value;
+ byte_en = 0xf;
+ break;
+ }
+
+ return tlp_cfg_dword_write(pcie, bus->number, devfn,
+ (where & ~DWORD_MASK), byte_en, data32);
+}
+
+static struct pci_ops altera_pcie_ops = {
+ .read = altera_pcie_cfg_read,
+ .write = altera_pcie_cfg_write,
+};
+
+static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = altera_pcie_intx_map,
+};
+
+static void altera_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+
+ while ((status = cra_readl(pcie, P2A_INT_STATUS)
+ & P2A_INT_STS_ALL) != 0) {
+ for_each_set_bit(bit, &status, INTX_NUM) {
+ /* clear interrupts */
+ cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
+
+ virq = irq_find_mapping(pcie->irq_domain, bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(&pcie->pdev->dev,
+ "unexpected IRQ, INT%d\n", bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie)
+{
+ pci_free_resource_list(&pcie->resources);
+}
+
+static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
+{
+ int err, res_valid = 0;
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
+ NULL);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, &pcie->resources) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ altera_pcie_release_of_pci_ranges(pcie);
+ return err;
+}
+
+static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* Setup INTx */
+ pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM,
+ &intx_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int altera_pcie_parse_dt(struct altera_pcie *pcie)
+{
+ struct resource *cra;
+ struct platform_device *pdev = pcie->pdev;
+
+ cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
+ if (!cra) {
+ dev_err(&pdev->dev, "no Cra memory resource defined\n");
+ return -ENODEV;
+ }
+
+ pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
+ if (IS_ERR(pcie->cra_base)) {
+ dev_err(&pdev->dev, "failed to map cra memory\n");
+ return PTR_ERR(pcie->cra_base);
+ }
+
+ /* setup IRQ */
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+
+ return 0;
+}
+
+static int altera_pcie_probe(struct platform_device *pdev)
+{
+ struct altera_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = altera_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Parsing DT failed\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed add resources\n");
+ return ret;
+ }
+
+ ret = altera_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+
+ bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
+ pcie, &pcie->resources);
+ if (!bus)
+ return -ENOMEM;
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_assign_unassigned_bus_resources(bus);
+
+ /* Configure PCI Express setting. */
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ platform_set_drvdata(pdev, pcie);
+ return ret;
+}
+
+static const struct of_device_id altera_pcie_of_match[] = {
+ { .compatible = "altr,pcie-root-port-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+
+static struct platform_driver altera_pcie_driver = {
+ .probe = altera_pcie_probe,
+ .driver = {
+ .name = "altera-pcie",
+ .of_match_table = altera_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int altera_pcie_init(void)
+{
+ return platform_driver_register(&altera_pcie_driver);
+}
+module_init(altera_pcie_init);
+
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52aa6e34002b..540f077c37ea 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -35,7 +35,7 @@
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
@@ -69,39 +69,40 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
-static struct hw_pci dw_pci;
+static struct pci_ops dw_pcie_ops;
-static unsigned long global_io_offset;
-
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
{
- BUG_ON(!sys->private_data);
-
- return sys->private_data;
-}
-
-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
-{
- *val = readl(addr);
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
+ if (size == 4)
+ *val = readl(addr);
else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
- else if (size != 4)
+ *val = readw(addr);
+ else if (size == 1)
+ *val = readb(addr);
+ else {
+ *val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
return PCIBIOS_SUCCESSFUL;
}
-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
if (size == 4)
writel(val, addr);
else if (size == 2)
- writew(val, addr + (where & 2));
+ writew(val, addr);
else if (size == 1)
- writeb(val, addr + (where & 3));
+ writeb(val, addr);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -132,8 +133,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->rd_own_conf)
ret = pp->ops->rd_own_conf(pp, where, size, val);
else
- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
- size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
return ret;
}
@@ -146,8 +146,7 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->wr_own_conf)
ret = pp->ops->wr_own_conf(pp, where, size, val);
else
- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
- size, val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
return ret;
}
@@ -205,12 +204,16 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
void dw_pcie_msi_init(struct pcie_port *pp)
{
+ u64 msi_target;
+
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+ msi_target = virt_to_phys((void *)pp->msi_data);
/* program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- virt_to_phys((void *)pp->msi_data));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+ (u32)(msi_target & 0xffffffff));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+ (u32)(msi_target >> 32 & 0xffffffff));
}
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@@ -255,7 +258,7 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
- struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
+ struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
@@ -286,6 +289,9 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
}
*pos = pos0;
+ desc->nvec_used = no_irqs;
+ desc->msi_attrib.multiple = order_base_2(no_irqs);
+
return irq;
no_valid_irq:
@@ -293,12 +299,32 @@ no_valid_irq:
return -ENOSPC;
}
+static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
+{
+ struct msi_msg msg;
+ u64 msi_target;
+
+ if (pp->ops->get_msi_addr)
+ msi_target = pp->ops->get_msi_addr(pp);
+ else
+ msi_target = virt_to_phys((void *)pp->msi_data);
+
+ msg.address_lo = (u32)(msi_target & 0xffffffff);
+ msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+
+ if (pp->ops->get_msi_data)
+ msg.data = pp->ops->get_msi_data(pp, pos);
+ else
+ msg.data = pos;
+
+ pci_write_msi_msg(irq, &msg);
+}
+
static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
int irq, pos;
- struct msi_msg msg;
- struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+ struct pcie_port *pp = pdev->bus->sysdata;
if (desc->msi_attrib.is_msix)
return -EINVAL;
@@ -307,33 +333,50 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
if (irq < 0)
return irq;
- if (pp->ops->get_msi_addr)
- msg.address_lo = pp->ops->get_msi_addr(pp);
- else
- msg.address_lo = virt_to_phys((void *)pp->msi_data);
- msg.address_hi = 0x0;
+ dw_msi_setup_msg(pp, irq, pos);
- if (pp->ops->get_msi_data)
- msg.data = pp->ops->get_msi_data(pp, pos);
- else
- msg.data = pos;
+ return 0;
+}
- pci_write_msi_msg(irq, &msg);
+static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
+ int nvec, int type)
+{
+#ifdef CONFIG_PCI_MSI
+ int irq, pos;
+ struct msi_desc *desc;
+ struct pcie_port *pp = pdev->bus->sysdata;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ irq = assign_irq(nvec, desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ dw_msi_setup_msg(pp, irq, pos);
return 0;
+#else
+ return -EINVAL;
+#endif
}
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct msi_desc *msi = irq_data_get_msi_desc(data);
- struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_controller dw_pcie_msi_chip = {
.setup_irq = dw_msi_setup_irq,
+ .setup_irqs = dw_msi_setup_irqs,
.teardown_irq = dw_msi_teardown_irq,
};
@@ -362,18 +405,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct platform_device *pdev = to_platform_device(pp->dev);
- struct of_pci_range range;
- struct of_pci_range_parser parser;
+ struct pci_bus *bus, *child;
struct resource *cfg_res;
- u32 val, na, ns;
- const __be32 *addrp;
- int i, index, ret;
-
- /* Find the address cell size and the number of cells in order to get
- * the untranslated address.
- */
- of_property_read_u32(np, "#address-cells", &na);
- ns = of_n_size_cells(np);
+ u32 val;
+ int i, ret;
+ LIST_HEAD(res);
+ struct resource_entry *win;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -381,88 +418,61 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size = resource_size(cfg_res)/2;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
-
- /* Find the untranslated configuration space address */
- index = of_property_match_string(np, "reg-names", "config");
- addrp = of_get_address(np, index, NULL, NULL);
- pp->cfg0_mod_base = of_read_number(addrp, ns);
- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
dev_err(pp->dev, "missing *config* reg space\n");
}
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(pp->dev, "missing ranges property\n");
- return -EINVAL;
- }
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
+ if (ret)
+ return ret;
/* Get the I/O and memory ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
-
- if (restype == IORESOURCE_IO) {
- of_pci_range_to_resource(&range, np, &pp->io);
- pp->io.name = "I/O";
- pp->io.start = max_t(resource_size_t,
- PCIBIOS_MIN_IO,
- range.pci_addr + global_io_offset);
- pp->io.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- range.pci_addr + range.size
- + global_io_offset - 1);
- pp->io_size = resource_size(&pp->io);
- pp->io_bus_addr = range.pci_addr;
- pp->io_base = range.cpu_addr;
-
- /* Find the untranslated IO space address */
- pp->io_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- }
- if (restype == IORESOURCE_MEM) {
- of_pci_range_to_resource(&range, np, &pp->mem);
- pp->mem.name = "MEM";
- pp->mem_size = resource_size(&pp->mem);
- pp->mem_bus_addr = range.pci_addr;
-
- /* Find the untranslated MEM space address */
- pp->mem_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- }
- if (restype == 0) {
- of_pci_range_to_resource(&range, np, &pp->cfg);
- pp->cfg0_size = resource_size(&pp->cfg)/2;
- pp->cfg1_size = resource_size(&pp->cfg)/2;
- pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
-
- /* Find the untranslated configuration space address */
- pp->cfg0_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- pp->cfg1_mod_base = pp->cfg0_mod_base +
- pp->cfg0_size;
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ ret = pci_remap_iospace(pp->io, pp->io_base);
+ if (ret) {
+ dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
+ ret, pp->io);
+ continue;
+ }
+ pp->io_base = pp->io->start;
+ break;
+ case IORESOURCE_MEM:
+ pp->mem = win->res;
+ pp->mem->name = "MEM";
+ pp->mem_size = resource_size(pp->mem);
+ pp->mem_bus_addr = pp->mem->start - win->offset;
+ break;
+ case 0:
+ pp->cfg = win->res;
+ pp->cfg0_size = resource_size(pp->cfg)/2;
+ pp->cfg1_size = resource_size(pp->cfg)/2;
+ pp->cfg0_base = pp->cfg->start;
+ pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
+ break;
+ case IORESOURCE_BUS:
+ pp->busn = win->res;
+ break;
+ default:
+ continue;
}
}
- ret = of_pci_parse_bus_range(np, &pp->busn);
- if (ret < 0) {
- pp->busn.name = np->name;
- pp->busn.start = 0;
- pp->busn.end = 0xff;
- pp->busn.flags = IORESOURCE_BUS;
- dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
- ret, &pp->busn);
- }
-
if (!pp->dbi_base) {
- pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
- resource_size(&pp->cfg));
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
+ resource_size(pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
return -ENOMEM;
}
}
- pp->mem_base = pp->mem.start;
+ pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -482,10 +492,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
- dev_err(pp->dev, "Failed to parse the number of lanes\n");
- return -EINVAL;
- }
+ ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
+ if (ret)
+ pp->lanes = 0;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
@@ -511,7 +520,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!pp->ops->rd_other_conf)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_MEM, pp->mem_mod_base,
+ PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
@@ -523,15 +532,35 @@ int dw_pcie_host_init(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-#ifdef CONFIG_PCI_MSI
- dw_pcie_msi_chip.dev = pp->dev;
+ pp->root_bus_nr = pp->busn->start;
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
+ &dw_pcie_ops, pp, &res,
+ &dw_pcie_msi_chip);
+ dw_pcie_msi_chip.dev = pp->dev;
+ } else
+ bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
+ pp, &res);
+ if (!bus)
+ return -ENOMEM;
+
+ if (pp->ops->scan_bus)
+ pp->ops->scan_bus(pp);
+
+#ifdef CONFIG_ARM
+ /* support old dtbs that incorrectly describe IRQs */
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
- dw_pci.nr_controllers = 1;
- dw_pci.private_data = (void **)&pp;
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
- pci_common_init_dev(pp->dev, &dw_pci);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+ pci_bus_add_devices(bus);
return 0;
}
@@ -539,22 +568,21 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
int ret, type;
- u32 address, busdev, cfg_size;
+ u32 busdev, cfg_size;
u64 cpu_addr;
void __iomem *va_cfg_base;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_mod_base;
+ cpu_addr = pp->cfg0_base;
cfg_size = pp->cfg0_size;
va_cfg_base = pp->va_cfg0_base;
} else {
type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_mod_base;
+ cpu_addr = pp->cfg1_base;
cfg_size = pp->cfg1_size;
va_cfg_base = pp->va_cfg1_base;
}
@@ -562,9 +590,9 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
type, cpu_addr,
busdev, cfg_size);
- ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val);
+ ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_mod_base,
+ PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
@@ -574,22 +602,21 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret, type;
- u32 address, busdev, cfg_size;
+ u32 busdev, cfg_size;
u64 cpu_addr;
void __iomem *va_cfg_base;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_mod_base;
+ cpu_addr = pp->cfg0_base;
cfg_size = pp->cfg0_size;
va_cfg_base = pp->va_cfg0_base;
} else {
type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_mod_base;
+ cpu_addr = pp->cfg1_base;
cfg_size = pp->cfg1_size;
va_cfg_base = pp->va_cfg1_base;
}
@@ -597,9 +624,9 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
type, cpu_addr,
busdev, cfg_size);
- ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val);
+ ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_mod_base,
+ PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
@@ -631,7 +658,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp,
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
- struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ struct pcie_port *pp = bus->sysdata;
int ret;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
@@ -655,7 +682,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
- struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ struct pcie_port *pp = bus->sysdata;
int ret;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
@@ -679,69 +706,6 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct pcie_port *pp;
-
- pp = sys_to_pcie(sys);
-
- if (global_io_offset < SZ_1M && pp->io_size > 0) {
- sys->io_offset = global_io_offset - pp->io_bus_addr;
- pci_ioremap_io(global_io_offset, pp->io_base);
- global_io_offset += SZ_64K;
- pci_add_resource_offset(&sys->resources, &pp->io,
- sys->io_offset);
- }
-
- sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
- pci_add_resource(&sys->resources, &pp->busn);
-
- return 1;
-}
-
-static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
-{
- struct pci_bus *bus;
- struct pcie_port *pp = sys_to_pcie(sys);
-
- pp->root_bus_nr = sys->busnr;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops,
- sys, &sys->resources,
- &dw_pcie_msi_chip);
- else
- bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
- sys, &sys->resources);
-
- if (!bus)
- return NULL;
-
- if (bus && pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
-
- return bus;
-}
-
-static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
- int irq;
-
- irq = of_irq_parse_and_map_pci(dev, slot, pin);
- if (!irq)
- irq = pp->irq;
-
- return irq;
-}
-
-static struct hw_pci dw_pci = {
- .setup = dw_pcie_setup,
- .scan = dw_pcie_scan_bus,
- .map_irq = dw_pcie_map_irq,
-};
-
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
@@ -764,6 +728,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
case 8:
val |= PORT_LINK_MODE_8_LANES;
break;
+ default:
+ dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
+ return;
}
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index d0bbd276840d..2356d29e8527 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -27,25 +27,21 @@ struct pcie_port {
u8 root_bus_nr;
void __iomem *dbi_base;
u64 cfg0_base;
- u64 cfg0_mod_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
u64 cfg1_base;
- u64 cfg1_mod_base;
void __iomem *va_cfg1_base;
u32 cfg1_size;
- u64 io_base;
- u64 io_mod_base;
+ resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
u64 mem_base;
- u64 mem_mod_base;
phys_addr_t mem_bus_addr;
u32 mem_size;
- struct resource cfg;
- struct resource io;
- struct resource mem;
- struct resource busn;
+ struct resource *cfg;
+ struct resource *io;
+ struct resource *mem;
+ struct resource *busn;
int irq;
u32 lanes;
struct pcie_host_ops *ops;
@@ -70,14 +66,14 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- u32 (*get_msi_addr)(struct pcie_port *pp);
+ phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
new file mode 100644
index 000000000000..35457ecd8e70
--- /dev/null
+++ b/drivers/pci/host/pcie-hisi.c
@@ -0,0 +1,198 @@
+/*
+ * PCIe host controller driver for HiSilicon Hip05 SoC
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou1@hisilicon.com>
+ * Dacai Zhu <zhudacai@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
+#define PCIE_LTSSM_LINKUP_STATE 0x11
+#define PCIE_LTSSM_STATE_MASK 0x3F
+
+#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
+
+struct hisi_pcie {
+ struct regmap *subctrl;
+ void __iomem *reg_base;
+ u32 port_id;
+ struct pcie_port pp;
+};
+
+static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel(val, pcie->reg_base + reg);
+}
+
+static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
+{
+ return readl(pcie->reg_base + reg);
+}
+
+/* Hip05 PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ u32 reg;
+ u32 reg_val;
+ struct hisi_pcie *pcie = to_hisi_pcie(pp);
+ void *walker = &reg_val;
+
+ walker += (where & 0x3);
+ reg = where & ~0x3;
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+
+ if (size == 1)
+ *val = *(u8 __force *) walker;
+ else if (size == 2)
+ *val = *(u16 __force *) walker;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Hip05 PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ u32 reg_val;
+ u32 reg;
+ struct hisi_pcie *pcie = to_hisi_pcie(pp);
+ void *walker = &reg_val;
+
+ walker += (where & 0x3);
+ reg = where & ~0x3;
+ if (size == 4)
+ hisi_pcie_apb_writel(pcie, val, reg);
+ else if (size == 2) {
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+ *(u16 __force *) walker = val;
+ hisi_pcie_apb_writel(pcie, reg_val, reg);
+ } else if (size == 1) {
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+ *(u8 __force *) walker = val;
+ hisi_pcie_apb_writel(pcie, reg_val, reg);
+ } else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int hisi_pcie_link_up(struct pcie_port *pp)
+{
+ u32 val;
+ struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
+
+ regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
+ 0x100 * hisi_pcie->port_id, &val);
+
+ return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static struct pcie_host_ops hisi_pcie_host_ops = {
+ .rd_own_conf = hisi_pcie_cfg_read,
+ .wr_own_conf = hisi_pcie_cfg_write,
+ .link_up = hisi_pcie_link_up,
+};
+
+static int __init hisi_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+ u32 port_id;
+ struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
+
+ if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
+ dev_err(&pdev->dev, "failed to read port-id\n");
+ return -EINVAL;
+ }
+ if (port_id > 3) {
+ dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
+ return -EINVAL;
+ }
+ hisi_pcie->port_id = port_id;
+
+ pp->ops = &hisi_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init hisi_pcie_probe(struct platform_device *pdev)
+{
+ struct hisi_pcie *hisi_pcie;
+ struct pcie_port *pp;
+ struct resource *reg;
+ int ret;
+
+ hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
+ if (!hisi_pcie)
+ return -ENOMEM;
+
+ pp = &hisi_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ hisi_pcie->subctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
+ if (IS_ERR(hisi_pcie->subctrl)) {
+ dev_err(pp->dev, "cannot get subctrl base\n");
+ return PTR_ERR(hisi_pcie->subctrl);
+ }
+
+ reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
+ hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(hisi_pcie->reg_base)) {
+ dev_err(pp->dev, "cannot get rc_dbi base\n");
+ return PTR_ERR(hisi_pcie->reg_base);
+ }
+
+ hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
+
+ ret = hisi_add_pcie_port(pp, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, hisi_pcie);
+
+ dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+
+ return 0;
+}
+
+static const struct of_device_id hisi_pcie_of_match[] = {
+ {.compatible = "hisilicon,hip05-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
+
+static struct platform_driver hisi_pcie_driver = {
+ .probe = hisi_pcie_probe,
+ .driver = {
+ .name = "hisi-pcie",
+ .of_match_table = hisi_pcie_of_match,
+ },
+};
+
+module_platform_driver(hisi_pcie_driver);
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 9aedc8eb2c6e..c9550dc8b8ed 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -54,6 +54,33 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ if (of_property_read_bool(np, "brcm,pcie-ob")) {
+ u32 val;
+
+ ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
+ &val);
+ if (ret) {
+ dev_err(pcie->dev,
+ "missing brcm,pcie-ob-axi-offset property\n");
+ return ret;
+ }
+ pcie->ob.axi_offset = val;
+
+ ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
+ &val);
+ if (ret) {
+ dev_err(pcie->dev,
+ "missing brcm,pcie-ob-window-size property\n");
+ return ret;
+ }
+ pcie->ob.window_size = (resource_size_t)val * SZ_1M;
+
+ if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
+ pcie->ob.set_oarr_size = true;
+
+ pcie->need_ob_cfg = true;
+ }
+
/* PHY use is optional */
pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index fe2efb141a9b..eac719af16aa 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
- * Copyright (C) 2015 Broadcom Corporatcommon ion
+ * Copyright (C) 2015 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -31,6 +31,8 @@
#include "pcie-iproc.h"
#define CLK_CONTROL_OFFSET 0x000
+#define EP_PERST_SOURCE_SELECT_SHIFT 2
+#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
#define EP_MODE_SURVIVE_PERST_SHIFT 1
#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
#define RC_PCIE_RST_OUTPUT_SHIFT 0
@@ -58,6 +60,24 @@
#define SYS_RC_INTX_EN 0x330
#define SYS_RC_INTX_MASK 0xf
+#define PCIE_LINK_STATUS_OFFSET 0xf0c
+#define PCIE_PHYLINKUP_SHIFT 3
+#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
+#define PCIE_DL_ACTIVE_SHIFT 2
+#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+
+#define OARR_VALID_SHIFT 0
+#define OARR_VALID BIT(OARR_VALID_SHIFT)
+#define OARR_SIZE_CFG_SHIFT 1
+#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
+
+#define OARR_LO(window) (0xd20 + (window) * 8)
+#define OARR_HI(window) (0xd24 + (window) * 8)
+#define OMAP_LO(window) (0xd40 + (window) * 8)
+#define OMAP_HI(window) (0xd44 + (window) * 8)
+
+#define MAX_NUM_OB_WINDOWS 2
+
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
struct iproc_pcie *pcie;
@@ -119,23 +139,32 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
u32 val;
/*
- * Configure the PCIe controller as root complex and send a downstream
- * reset
+ * Select perst_b signal as reset source. Put the device into reset,
+ * and then bring it out of reset
*/
- val = EP_MODE_SURVIVE_PERST | RC_PCIE_RST_OUTPUT;
+ val = readl(pcie->base + CLK_CONTROL_OFFSET);
+ val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
+ ~RC_PCIE_RST_OUTPUT;
writel(val, pcie->base + CLK_CONTROL_OFFSET);
udelay(250);
- val &= ~EP_MODE_SURVIVE_PERST;
+
+ val |= RC_PCIE_RST_OUTPUT;
writel(val, pcie->base + CLK_CONTROL_OFFSET);
- msleep(250);
+ msleep(100);
}
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
u8 hdr_type;
- u32 link_ctrl;
+ u32 link_ctrl, class, val;
u16 pos, link_status;
- int link_is_active = 0;
+ bool link_is_active = false;
+
+ val = readl(pcie->base + PCIE_LINK_STATUS_OFFSET);
+ if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
+ dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
+ return -ENODEV;
+ }
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
@@ -145,14 +174,19 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
/* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
- pci_bus_write_config_word(bus, 0, PCI_CLASS_DEVICE,
- PCI_CLASS_BRIDGE_PCI);
+#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
+#define PCI_CLASS_BRIDGE_MASK 0xffff00
+#define PCI_CLASS_BRIDGE_SHIFT 8
+ pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
+ class &= ~PCI_CLASS_BRIDGE_MASK;
+ class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+ pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
- link_is_active = 1;
+ link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
@@ -176,7 +210,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
- link_is_active = 1;
+ link_is_active = true;
}
}
@@ -190,6 +224,101 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
}
+/**
+ * Some iProc SoCs require the SW to configure the outbound address mapping
+ *
+ * Outbound address translation:
+ *
+ * iproc_pcie_address = axi_address - axi_offset
+ * OARR = iproc_pcie_address
+ * OMAP = pci_addr
+ *
+ * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
+ */
+static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ unsigned i;
+ u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
+ u64 remainder;
+
+ if (size > max_size) {
+ dev_err(pcie->dev,
+ "res size 0x%pap exceeds max supported size 0x%llx\n",
+ &size, max_size);
+ return -EINVAL;
+ }
+
+ div64_u64_rem(size, ob->window_size, &remainder);
+ if (remainder) {
+ dev_err(pcie->dev,
+ "res size %pap needs to be multiple of window size %pap\n",
+ &size, &ob->window_size);
+ return -EINVAL;
+ }
+
+ if (axi_addr < ob->axi_offset) {
+ dev_err(pcie->dev,
+ "axi address %pap less than offset %pap\n",
+ &axi_addr, &ob->axi_offset);
+ return -EINVAL;
+ }
+
+ /*
+ * Translate the AXI address to the internal address used by the iProc
+ * PCIe core before programming the OARR
+ */
+ axi_addr -= ob->axi_offset;
+
+ for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
+ writel(lower_32_bits(axi_addr) | OARR_VALID |
+ (ob->set_oarr_size ? 1 : 0), pcie->base + OARR_LO(i));
+ writel(upper_32_bits(axi_addr), pcie->base + OARR_HI(i));
+ writel(lower_32_bits(pci_addr), pcie->base + OMAP_LO(i));
+ writel(upper_32_bits(pci_addr), pcie->base + OMAP_HI(i));
+
+ size -= ob->window_size;
+ if (size == 0)
+ break;
+
+ axi_addr += ob->window_size;
+ pci_addr += ob->window_size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
+ struct list_head *resources)
+{
+ struct resource_entry *window;
+ int ret;
+
+ resource_list_for_each_entry(window, resources) {
+ struct resource *res = window->res;
+ u64 res_type = resource_type(res);
+
+ switch (res_type) {
+ case IORESOURCE_IO:
+ case IORESOURCE_BUS:
+ break;
+ case IORESOURCE_MEM:
+ ret = iproc_pcie_setup_ob(pcie, res->start,
+ res->start - window->offset,
+ resource_size(res));
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(pcie->dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
int ret;
@@ -213,6 +342,14 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
iproc_pcie_reset(pcie);
+ if (pcie->need_ob_cfg) {
+ ret = iproc_pcie_map_ranges(pcie, res);
+ if (ret) {
+ dev_err(pcie->dev, "map failed\n");
+ goto err_power_off_phy;
+ }
+ }
+
#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
sysdata = &pcie->sysdata;
@@ -238,9 +375,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
-#ifdef CONFIG_ARM
pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
-#endif
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index c9e4c10a462e..d3dc940f773a 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -14,17 +14,30 @@
#ifndef _PCIE_IPROC_H
#define _PCIE_IPROC_H
-#define IPROC_PCIE_MAX_NUM_IRQS 6
+/**
+ * iProc PCIe outbound mapping
+ * @set_oarr_size: indicates the OARR size bit needs to be set
+ * @axi_offset: offset from the AXI address to the internal address used by
+ * the iProc PCIe core
+ * @window_size: outbound window size
+ */
+struct iproc_pcie_ob {
+ bool set_oarr_size;
+ resource_size_t axi_offset;
+ resource_size_t window_size;
+};
/**
* iProc PCIe device
* @dev: pointer to device data structure
* @base: PCIe host controller I/O register base
- * @resources: linked list of all PCI resources
* @sysdata: Per PCI controller data (ARM-specific)
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @irqs: interrupt IDs
+ * @map_irq: function callback to map interrupts
+ * @need_ob_cfg: indidates SW needs to configure the outbound mapping window
+ * @ob: outbound mapping parameters
*/
struct iproc_pcie {
struct device *dev;
@@ -34,8 +47,9 @@ struct iproc_pcie {
#endif
struct pci_bus *root_bus;
struct phy *phy;
- int irqs[IPROC_PCIE_MAX_NUM_IRQS];
int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool need_ob_cfg;
+ struct iproc_pcie_ob ob;
};
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 7678fe0820d7..f4fa6c537448 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -108,6 +108,8 @@
#define RCAR_PCI_MAX_RESOURCES 4
#define MAX_NR_INBOUND_MAPS 6
+static unsigned long global_io_offset;
+
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
@@ -124,7 +126,16 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
}
/* Structure representing the PCIe interface */
+/*
+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so
+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to
+ * a struct pci_sys_data.
+ */
struct rcar_pcie {
+#ifdef CONFIG_ARM
+ struct pci_sys_data sys;
+#endif
struct device *dev;
void __iomem *base;
struct resource res[RCAR_PCI_MAX_RESOURCES];
@@ -135,11 +146,6 @@ struct rcar_pcie {
struct rcar_msi msi;
};
-static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
unsigned long reg)
{
@@ -258,7 +264,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct rcar_pcie *pcie = bus->sysdata;
int ret;
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
@@ -283,7 +289,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct rcar_pcie *pcie = bus->sysdata;
int shift, ret;
u32 data;
@@ -353,13 +359,12 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
}
-static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
+static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie)
{
- struct rcar_pcie *pcie = sys_to_pcie(sys);
struct resource *res;
int i;
- pcie->root_bus_nr = -1;
+ pcie->root_bus_nr = pcie->busn.start;
/* Setup PCI resources */
for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
@@ -372,32 +377,53 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
if (res->flags & IORESOURCE_IO) {
phys_addr_t io_start = pci_pio_to_address(res->start);
- pci_ioremap_io(nr * SZ_64K, io_start);
- } else
- pci_add_resource(&sys->resources, res);
+ pci_ioremap_io(global_io_offset, io_start);
+ global_io_offset += SZ_64K;
+ }
+
+ pci_add_resource(resource, res);
}
- pci_add_resource(&sys->resources, &pcie->busn);
+ pci_add_resource(resource, &pcie->busn);
return 1;
}
-static struct hw_pci rcar_pci = {
- .setup = rcar_pcie_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .ops = &rcar_pcie_ops,
-};
-
-static void rcar_pcie_enable(struct rcar_pcie *pcie)
+static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct pci_bus *bus, *child;
+ LIST_HEAD(res);
- rcar_pci.nr_controllers = 1;
- rcar_pci.private_data = (void **)&pcie;
-#ifdef CONFIG_PCI_MSI
- rcar_pci.msi_ctrl = &pcie->msi.chip;
-#endif
+ rcar_pcie_setup(&res, pcie);
+
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
- pci_common_init_dev(&pdev->dev, &rcar_pci);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
+ &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
+ else
+ bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
+ &rcar_pcie_ops, pcie, &res);
+
+ if (!bus) {
+ dev_err(pcie->dev, "Scanning rootbus failed");
+ return -ENODEV;
+ }
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ pci_bus_add_devices(bus);
+
+ return 0;
}
static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -970,9 +996,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
data = rcar_pci_read_reg(pcie, MACSR);
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
- rcar_pcie_enable(pcie);
-
- return 0;
+ return rcar_pcie_enable(pcie);
}
static struct platform_driver rcar_pcie_driver = {
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 98d2683181bc..b95b7563c052 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -163,34 +163,34 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
* default value in capability register is 512 bytes. So force
* it to 128 here.
*/
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
- dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
+ dw_pcie_cfg_write(pp->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
+ dw_pcie_cfg_write(pp->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
/*
* if is_gen1 is set then handle it, so that some buggy card
* also works
*/
if (spear13xx_pcie->is_gen1) {
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
- &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+ 4, &val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
+ PCI_EXP_LNKCAP, 4, val);
}
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
- &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+ 2, &val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
- PCI_EXP_LNKCTL2, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
+ PCI_EXP_LNKCTL2, 2, val);
}
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index f3796124ad7c..4c8f4cde6854 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -204,36 +204,39 @@ static void pciehp_power_thread(struct work_struct *work)
kfree(info);
}
-void pciehp_queue_pushbutton_work(struct work_struct *work)
+static void pciehp_queue_power_work(struct slot *p_slot, int req)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
struct power_work_info *info;
+ p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
+ ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
+ (req == ENABLE_REQ) ? "poweron" : "poweroff");
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
+ info->req = req;
+ queue_work(p_slot->wq, &info->work);
+}
+
+void pciehp_queue_pushbutton_work(struct work_struct *work)
+{
+ struct slot *p_slot = container_of(work, struct slot, work.work);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case BLINKINGON_STATE:
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
default:
- kfree(info);
- goto out;
+ break;
}
- queue_work(p_slot->wq, &info->work);
- out:
mutex_unlock(&p_slot->lock);
}
@@ -301,27 +304,12 @@ static void handle_button_press_event(struct slot *p_slot)
static void handle_surprise_event(struct slot *p_slot)
{
u8 getstatus;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
- } else {
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
- }
-
- queue_work(p_slot->wq, &info->work);
+ if (!getstatus)
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ else
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
}
/*
@@ -330,17 +318,6 @@ static void handle_surprise_event(struct slot *p_slot)
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
- INIT_WORK(&info->work, pciehp_power_thread);
switch (p_slot->state) {
case BLINKINGON_STATE:
@@ -348,22 +325,19 @@ static void handle_link_event(struct slot *p_slot, u32 event)
cancel_delayed_work(&p_slot->work);
/* Fall through */
case STATIC_STATE:
- p_slot->state = event == INT_LINK_UP ?
- POWERON_STATE : POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
+ ENABLE_REQ : DISABLE_REQ);
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl,
"Link Up event ignored on slot(%s): already powering on\n",
slot_name(p_slot));
- kfree(info);
} else {
ctrl_info(ctrl,
"Link Down event queued on slot(%s): currently getting powered on\n",
slot_name(p_slot));
- p_slot->state = POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
@@ -371,19 +345,16 @@ static void handle_link_event(struct slot *p_slot, u32 event)
ctrl_info(ctrl,
"Link Up event queued on slot(%s): currently getting powered off\n",
slot_name(p_slot));
- p_slot->state = POWERON_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
ctrl_info(ctrl,
"Link Down event ignored on slot(%s): already powering off\n",
slot_name(p_slot));
- kfree(info);
}
break;
default:
ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
p_slot->state, slot_name(p_slot));
- kfree(info);
break;
}
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ee0ebff103a4..31f31d460fc9 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -54,24 +54,29 @@ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
* The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
* determine how many additional bus numbers will be consumed by VFs.
*
- * Iterate over all valid NumVFs and calculate the maximum number of bus
- * numbers that could ever be required.
+ * Iterate over all valid NumVFs, validate offset and stride, and calculate
+ * the maximum number of bus numbers that could ever be required.
*/
-static inline u8 virtfn_max_buses(struct pci_dev *dev)
+static int compute_max_vf_buses(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
- int nr_virtfn;
- u8 max = 0;
- int busnr;
+ int nr_virtfn, busnr, rc = 0;
- for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
+ for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
pci_iov_set_numvfs(dev, nr_virtfn);
+ if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
+ rc = -EIO;
+ goto out;
+ }
+
busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
- if (busnr > max)
- max = busnr;
+ if (busnr > iov->max_VF_buses)
+ iov->max_VF_buses = busnr;
}
- return max;
+out:
+ pci_iov_set_numvfs(dev, 0);
+ return rc;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
@@ -222,21 +227,25 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
- return 0;
+ return 0;
+}
+
+int __weak pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ return 0;
}
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
- int i, j;
+ int i;
int nres;
- u16 offset, stride, initial;
+ u16 initial;
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
int bus;
- int retval;
if (!nr_virtfn)
return 0;
@@ -253,11 +262,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (nr_virtfn > 1 && !stride))
- return -EIO;
-
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
@@ -270,9 +274,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
- iov->offset = offset;
- iov->stride = stride;
-
bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (bus > dev->bus->busn_res.end) {
dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
@@ -313,10 +314,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (nr_virtfn < initial)
initial = nr_virtfn;
- if ((retval = pcibios_sriov_enable(dev, initial))) {
- dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
- retval);
- return retval;
+ rc = pcibios_sriov_enable(dev, initial);
+ if (rc) {
+ dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc);
+ goto err_pcibios;
}
for (i = 0; i < initial; i++) {
@@ -331,27 +332,24 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return 0;
failed:
- for (j = 0; j < i; j++)
- virtfn_remove(dev, j, 0);
+ while (i--)
+ virtfn_remove(dev, i, 0);
+ pcibios_sriov_disable(dev);
+err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- pci_iov_set_numvfs(dev, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
+ pci_iov_set_numvfs(dev, 0);
return rc;
}
-int __weak pcibios_sriov_disable(struct pci_dev *pdev)
-{
- return 0;
-}
-
static void sriov_disable(struct pci_dev *dev)
{
int i;
@@ -384,7 +382,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
int rc;
int nres;
u32 pgsz;
- u16 ctrl, total, offset, stride;
+ u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
struct pci_dev *pdev;
@@ -399,10 +397,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
ssleep(1);
}
- pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
- if (!total)
- return 0;
-
ctrl = 0;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev->is_physfn)
@@ -414,11 +408,10 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (total > 1 && !stride))
- return -EIO;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
+ if (!total)
+ return 0;
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
@@ -436,8 +429,15 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
- bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ /*
+ * If it is already FIXED, don't change it, something
+ * (perhaps EA or header fixups) wants it this way.
+ */
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+ else
+ bar64 = __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@@ -456,8 +456,6 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
- iov->offset = offset;
- iov->stride = stride;
iov->pgsz = pgsz;
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
@@ -474,10 +472,15 @@ found:
dev->sriov = iov;
dev->is_physfn = 1;
- iov->max_VF_buses = virtfn_max_buses(dev);
+ rc = compute_max_vf_buses(dev);
+ if (rc)
+ goto fail_max_buses;
return 0;
+fail_max_buses:
+ dev->sriov = NULL;
+ dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d4497141d083..53e463244bb7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
#include "pci.h"
@@ -105,9 +106,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq)
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
+ struct msi_controller *chip = dev->bus->msi;
struct msi_desc *entry;
int ret;
+ if (chip && chip->setup_irqs)
+ return chip->setup_irqs(chip, dev, nvec, type);
/*
* If an architecture wants to support multiple MSI, it needs to
* override arch_setup_msi_irqs()
@@ -475,10 +479,11 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
int ret = -ENOMEM;
int num_msi = 0;
int count = 0;
+ int i;
/* Determine how many msi entries we have */
for_each_pci_msi_entry(entry, pdev)
- ++num_msi;
+ num_msi += entry->nvec_used;
if (!num_msi)
return 0;
@@ -487,19 +492,21 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
if (!msi_attrs)
return -ENOMEM;
for_each_pci_msi_entry(entry, pdev) {
- msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
- goto error_attrs;
- msi_attrs[count] = &msi_dev_attr->attr;
-
- sysfs_attr_init(&msi_dev_attr->attr);
- msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
- entry->irq);
- if (!msi_dev_attr->attr.name)
- goto error_attrs;
- msi_dev_attr->attr.mode = S_IRUGO;
- msi_dev_attr->show = msi_mode_show;
- ++count;
+ for (i = 0; i < entry->nvec_used; i++) {
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ msi_attrs[count] = &msi_dev_attr->attr;
+
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq + i);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ ++count;
+ }
}
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
@@ -1243,11 +1250,15 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
BUG_ON(!chip);
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+ if (!chip->irq_mask)
+ chip->irq_mask = pci_msi_mask_irq;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = pci_msi_unmask_irq;
}
/**
- * pci_msi_create_irq_domain - Creat a MSI interrupt domain
- * @node: Optional device-tree node of the interrupt controller
+ * pci_msi_create_irq_domain - Create a MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
@@ -1256,7 +1267,7 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
* Returns:
* A domain pointer or NULL in case of failure.
*/
-struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
@@ -1267,7 +1278,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
- domain = msi_create_irq_domain(node, info, parent);
+ domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
@@ -1303,14 +1314,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
/**
* pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
- * @node: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Returns: A domain pointer or NULL in case of failure. If successful
* the default PCI/MSI irqdomain pointer is updated.
*/
-struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info, struct irq_domain *parent)
{
struct irq_domain *domain;
@@ -1320,11 +1331,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
domain = NULL;
} else {
- domain = pci_msi_create_irq_domain(node, info, parent);
+ domain = pci_msi_create_irq_domain(fwnode, info, parent);
pci_msi_default_domain = domain;
}
mutex_unlock(&pci_msi_domain_lock);
return domain;
}
+
+static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *pa = data;
+
+ *pa = alias;
+ return 0;
+}
+/**
+ * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+ * @domain: The interrupt domain
+ * @pdev: The PCI device.
+ *
+ * The RID for a device is formed from the alias, with a firmware
+ * supplied mapping applied
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+{
+ struct device_node *of_node;
+ u32 rid = 0;
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+ of_node = irq_domain_get_of_node(domain);
+ if (of_node)
+ rid = of_msi_map_rid(&pdev->dev, of_node, rid);
+
+ return rid;
+}
+
+/**
+ * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+ * @pdev: The PCI device
+ *
+ * Use the firmware data to find a device-specific MSI domain
+ * (i.e. not one that is ste as a default).
+ *
+ * Returns: The coresponding MSI domain or NULL if none has been found.
+ */
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+ u32 rid = 0;
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+ return of_msi_map_get_device_domain(&pdev->dev, rid);
+}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 2e99a500cb83..e112da11630e 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include "pci.h"
@@ -64,27 +65,25 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
{
#ifdef CONFIG_IRQ_DOMAIN
- struct device_node *np;
struct irq_domain *d;
if (!bus->dev.of_node)
return NULL;
/* Start looking for a phandle to an MSI controller. */
- np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+ d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
+ if (d)
+ return d;
/*
* If we don't have an msi-parent property, look for a domain
* directly attached to the host bridge.
*/
- if (!np)
- np = bus->dev.of_node;
-
- d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
if (d)
return d;
- return irq_find_host(np);
+ return irq_find_host(bus->dev.of_node);
#else
return NULL;
#endif
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..4446fcb5effd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -172,7 +172,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
__u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields = 0;
- int retval = -ENODEV;
+ size_t retval = -ENODEV;
fields = sscanf(buf, "%x %x %x %x %x %x",
&vendor, &device, &subvendor, &subdevice,
@@ -190,15 +190,13 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
!((id->class ^ class) & class_mask)) {
list_del(&dynid->node);
kfree(dynid);
- retval = 0;
+ retval = count;
break;
}
}
spin_unlock(&pdrv->dynids.lock);
- if (retval)
- return retval;
- return count;
+ return retval;
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
@@ -299,9 +297,10 @@ static long local_pci_probe(void *_ddi)
* Unbound PCI devices are always put in D0, regardless of
* runtime PM status. During probe, the device is set to
* active and the usage count is incremented. If the driver
- * supports runtime PM, it should call pm_runtime_put_noidle()
- * in its probe routine and pm_runtime_get_noresume() in its
- * remove routine.
+ * supports runtime PM, it should call pm_runtime_put_noidle(),
+ * or any other runtime PM helper function decrementing the usage
+ * count, in its probe routine and pm_runtime_get_noresume() in
+ * its remove routine.
*/
pm_runtime_get_sync(dev);
pci_dev->driver = pci_drv;
@@ -683,10 +682,16 @@ static int pci_pm_prepare(struct device *dev)
return pci_dev_keep_suspended(to_pci_dev(dev));
}
+static void pci_pm_complete(struct device *dev)
+{
+ pci_dev_complete_resume(to_pci_dev(dev));
+ pm_complete_with_resume_check(dev);
+}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
+#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
@@ -1217,6 +1222,7 @@ static int pci_pm_runtime_idle(struct device *dev)
static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
+ .complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 312f23a8429c..92618686604c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -216,7 +216,7 @@ static ssize_t numa_node_store(struct device *dev,
if (ret)
return ret;
- if (!node_online(node))
+ if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6a9a1116f1eb..314db8c1047a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -27,6 +27,7 @@
#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
+#include <linux/aer.h>
#include "pci.h"
const char *pci_power_names[] = {
@@ -458,6 +459,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
EXPORT_SYMBOL(pci_find_parent_resource);
/**
+ * pci_find_pcie_root_port - return PCIe Root Port
+ * @dev: PCI device to query
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI Device.
+ */
+struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
+{
+ struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+
+ bridge = pci_upstream_bridge(dev);
+ while (bridge && pci_is_pcie(bridge)) {
+ highest_pcie_bridge = bridge;
+ bridge = pci_upstream_bridge(bridge);
+ }
+
+ if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
+ return NULL;
+
+ return highest_pcie_bridge;
+}
+EXPORT_SYMBOL(pci_find_pcie_root_port);
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -484,7 +509,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
}
/**
- * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
@@ -494,6 +519,10 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -1099,6 +1128,8 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_cleanup_aer_error_status_regs(dev);
+
pci_restore_config_space(dev);
pci_restore_pcix_state(dev);
@@ -1710,15 +1741,7 @@ static void pci_pme_list_scan(struct work_struct *work)
mutex_unlock(&pci_pme_list_mutex);
}
-/**
- * pci_pme_active - enable or disable PCI device's PME# function
- * @dev: PCI device to handle.
- * @enable: 'true' to enable PME# generation; 'false' to disable it.
- *
- * The caller must verify that the device is capable of generating PME# before
- * calling this function with @enable equal to 'true'.
- */
-void pci_pme_active(struct pci_dev *dev, bool enable)
+static void __pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
@@ -1732,6 +1755,19 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+/**
+ * pci_pme_active - enable or disable PCI device's PME# function
+ * @dev: PCI device to handle.
+ * @enable: 'true' to enable PME# generation; 'false' to disable it.
+ *
+ * The caller must verify that the device is capable of generating PME# before
+ * calling this function with @enable equal to 'true'.
+ */
+void pci_pme_active(struct pci_dev *dev, bool enable)
+{
+ __pci_pme_active(dev, enable);
/*
* PCI (as opposed to PCIe) PME requires that the device have
@@ -2032,17 +2068,60 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
* reconfigured due to wakeup settings difference between system and runtime
* suspend and the current power state of it is suitable for the upcoming
* (system) transition.
+ *
+ * If the device is not configured for system wakeup, disable PME for it before
+ * returning 'true' to prevent it from waking up the system unnecessarily.
*/
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
if (!pm_runtime_suspended(dev)
- || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ || pci_target_state(pci_dev) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev))
return false;
- return pci_target_state(pci_dev) == pci_dev->current_state;
+ /*
+ * At this point the device is good to go unless it's been configured
+ * to generate PME at the runtime suspend time, but it is not supposed
+ * to wake up the system. In that case, simply disable PME for it
+ * (it will have to be re-enabled on exit from system resume).
+ *
+ * If the device's power state is D3cold and the platform check above
+ * hasn't triggered, the device's configuration is suitable and we don't
+ * need to manipulate it at all.
+ */
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
+ !device_may_wakeup(dev))
+ __pci_pme_active(pci_dev, false);
+
+ spin_unlock_irq(&dev->power.lock);
+ return true;
+}
+
+/**
+ * pci_dev_complete_resume - Finalize resume from system sleep for a device.
+ * @pci_dev: Device to handle.
+ *
+ * If the device is runtime suspended and wakeup-capable, enable PME for it as
+ * it might have been disabled during the prepare phase of system suspend if
+ * the device was not configured for system wakeup.
+ */
+void pci_dev_complete_resume(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
+ if (!pci_dev_run_wake(pci_dev))
+ return;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
+ __pci_pme_active(pci_dev, true);
+
+ spin_unlock_irq(&dev->power.lock);
}
void pci_config_pm_runtime_get(struct pci_dev *pdev)
@@ -2148,6 +2227,198 @@ void pci_pm_init(struct pci_dev *dev)
}
}
+static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
+{
+ unsigned long flags = IORESOURCE_PCI_FIXED;
+
+ switch (prop) {
+ case PCI_EA_P_MEM:
+ case PCI_EA_P_VF_MEM:
+ flags |= IORESOURCE_MEM;
+ break;
+ case PCI_EA_P_MEM_PREFETCH:
+ case PCI_EA_P_VF_MEM_PREFETCH:
+ flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ break;
+ case PCI_EA_P_IO:
+ flags |= IORESOURCE_IO;
+ break;
+ default:
+ return 0;
+ }
+
+ return flags;
+}
+
+static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ u8 prop)
+{
+ if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
+ return &dev->resource[bei];
+#ifdef CONFIG_PCI_IOV
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
+ (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
+ return &dev->resource[PCI_IOV_RESOURCES +
+ bei - PCI_EA_BEI_VF_BAR0];
+#endif
+ else if (bei == PCI_EA_BEI_ROM)
+ return &dev->resource[PCI_ROM_RESOURCE];
+ else
+ return NULL;
+}
+
+/* Read an Enhanced Allocation (EA) entry */
+static int pci_ea_read(struct pci_dev *dev, int offset)
+{
+ struct resource *res;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+ u32 dw0, bei, base, max_offset;
+ u8 prop;
+ bool support_64 = (sizeof(resource_size_t) >= 8);
+
+ pci_read_config_dword(dev, ent_offset, &dw0);
+ ent_offset += 4;
+
+ /* Entry size field indicates DWORDs after 1st */
+ ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
+
+ if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
+ goto out;
+
+ bei = (dw0 & PCI_EA_BEI) >> 4;
+ prop = (dw0 & PCI_EA_PP) >> 8;
+
+ /*
+ * If the Property is in the reserved range, try the Secondary
+ * Property instead.
+ */
+ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
+ prop = (dw0 & PCI_EA_SP) >> 16;
+ if (prop > PCI_EA_P_BRIDGE_IO)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
+ if (!res) {
+ dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+ }
+
+ flags = pci_ea_flags(dev, prop);
+ if (!flags) {
+ dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
+ goto out;
+ }
+
+ /* Read Base */
+ pci_read_config_dword(dev, ent_offset, &base);
+ start = (base & PCI_EA_FIELD_MASK);
+ ent_offset += 4;
+
+ /* Read MaxOffset */
+ pci_read_config_dword(dev, ent_offset, &max_offset);
+ ent_offset += 4;
+
+ /* Read Base MSBs (if 64-bit entry) */
+ if (base & PCI_EA_IS_64) {
+ u32 base_upper;
+
+ pci_read_config_dword(dev, ent_offset, &base_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry starts above 32-bit boundary, can't use */
+ if (!support_64 && base_upper)
+ goto out;
+
+ if (support_64)
+ start |= ((u64)base_upper << 32);
+ }
+
+ end = start + (max_offset | 0x03);
+
+ /* Read MaxOffset MSBs (if 64-bit entry) */
+ if (max_offset & PCI_EA_IS_64) {
+ u32 max_offset_upper;
+
+ pci_read_config_dword(dev, ent_offset, &max_offset_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry too big, can't use */
+ if (!support_64 && max_offset_upper)
+ goto out;
+
+ if (support_64)
+ end += ((u64)max_offset_upper << 32);
+ }
+
+ if (end < start) {
+ dev_err(&dev->dev, "EA Entry crosses address boundary\n");
+ goto out;
+ }
+
+ if (ent_size != ent_offset - offset) {
+ dev_err(&dev->dev,
+ "EA Entry Size (%d) does not match length read (%d)\n",
+ ent_size, ent_offset - offset);
+ goto out;
+ }
+
+ res->name = pci_name(dev);
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+ dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ else
+ dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+
+out:
+ return offset + ent_size;
+}
+
+/* Enhanced Allocation Initalization */
+void pci_ea_init(struct pci_dev *dev)
+{
+ int ea;
+ u8 num_ent;
+ int offset;
+ int i;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return;
+
+ /* determine the number of entries */
+ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
+ &num_ent);
+ num_ent &= PCI_EA_NUM_ENT_MASK;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+
+ /* Skip DWORD 2 for type 1 functions */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ offset += 4;
+
+ /* parse each EA entry */
+ for (i = 0; i < num_ent; ++i)
+ offset = pci_ea_read(dev, offset);
+}
+
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 24ba9dc8910a..fd2f03fa53f3 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -75,9 +75,11 @@ void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
bool pci_dev_keep_suspended(struct pci_dev *dev);
+void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
+void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9803e3d039fe..fba785e9df75 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -74,6 +74,34 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ int port_type;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -EIO;
+
+ port_type = pci_pcie_type(dev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
+ }
+
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+
+ return 0;
+}
+
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8361d27e5eca..edb1984201e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -6,12 +6,15 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
+#include <linux/aer.h>
+#include <linux/acpi.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
@@ -1597,6 +1600,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
static void pci_init_capabilities(struct pci_dev *dev)
{
+ /* Enhanced Allocation */
+ pci_ea_init(dev);
+
/* MSI/MSI-X list */
pci_msi_init_pci_dev(dev);
@@ -1620,17 +1626,80 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
+
+ pci_cleanup_aer_error_status_regs(dev);
+}
+
+/*
+ * This is the equivalent of pci_host_bridge_msi_domain that acts on
+ * devices. Firmware interfaces that can select the MSI domain on a
+ * per-device basis should be called from here.
+ */
+static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
+{
+ struct irq_domain *d;
+
+ /*
+ * If a domain has been set through the pcibios_add_device
+ * callback, then this is the one (platform code knows best).
+ */
+ d = dev_get_msi_domain(&dev->dev);
+ if (d)
+ return d;
+
+ /*
+ * Let's see if we have a firmware interface able to provide
+ * the domain.
+ */
+ d = pci_msi_get_device_domain(dev);
+ if (d)
+ return d;
+
+ return NULL;
}
static void pci_set_msi_domain(struct pci_dev *dev)
{
+ struct irq_domain *d;
+
/*
- * If no domain has been set through the pcibios_add_device
- * callback, inherit the default from the bus device.
+ * If the platform or firmware interfaces cannot supply a
+ * device-specific MSI domain, then inherit the default domain
+ * from the host bridge itself.
*/
- if (!dev_get_msi_domain(&dev->dev))
- dev_set_msi_domain(&dev->dev,
- dev_get_msi_domain(&dev->bus->dev));
+ d = pci_dev_msi_domain(dev);
+ if (!d)
+ d = dev_get_msi_domain(&dev->bus->dev);
+
+ dev_set_msi_domain(&dev->dev, d);
+}
+
+/**
+ * pci_dma_configure - Setup DMA configuration
+ * @dev: ptr to pci_dev struct of the PCI device
+ *
+ * Function to update PCI devices's DMA configuration using the same
+ * info from the OF node or ACPI node of host bridge's parent (if any).
+ */
+static void pci_dma_configure(struct pci_dev *dev)
+{
+ struct device *bridge = pci_get_host_bridge_device(dev);
+
+ if (IS_ENABLED(CONFIG_OF) &&
+ bridge->parent && bridge->parent->of_node) {
+ of_dma_configure(&dev->dev, bridge->parent->of_node);
+ } else if (has_acpi_companion(bridge)) {
+ struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
+ enum dev_dma_attr attr = acpi_get_dma_attr(adev);
+
+ if (attr == DEV_DMA_NOT_SUPPORTED)
+ dev_warn(&dev->dev, "DMA not supported.\n");
+ else
+ arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
+ attr == DEV_DMA_COHERENT);
+ }
+
+ pci_put_host_bridge_device(bridge);
}
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1646,7 +1715,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- of_pci_dma_configure(dev);
+ pci_dma_configure(dev);
pci_set_dma_max_seg_size(dev, 65536);
pci_set_dma_seg_boundary(dev, 0xffffffff);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 78a70fb26d80..c2dd52ea4198 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2246,6 +2246,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
@@ -3710,6 +3711,63 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
+ * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
+ * values for the Attribute as were supplied in the header of the
+ * corresponding Request, except as explicitly allowed when IDO is used."
+ *
+ * If a non-compliant device generates a completion with a different
+ * attribute than the request, the receiver may accept it (which itself
+ * seems non-compliant based on sec 2.3.2), or it may handle it as a
+ * Malformed TLP or an Unexpected Completion, which will probably lead to a
+ * device access timeout.
+ *
+ * If the non-compliant device generates completions with zero attributes
+ * (instead of copying the attributes from the request), we can work around
+ * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
+ * upstream devices so they always generate requests with zero attributes.
+ *
+ * This affects other devices under the same Root Port, but since these
+ * attributes are performance hints, there should be no functional problem.
+ *
+ * Note that Configuration Space accesses are never supposed to have TLP
+ * Attributes, so we're safe waiting till after any Configuration Space
+ * accesses to do the Root Port fixup.
+ */
+static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
+
+ if (!root_port) {
+ dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
+ return;
+ }
+
+ dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
+ dev_name(&pdev->dev));
+ pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+}
+
+/*
+ * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
+ * Completion it generates.
+ */
+static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ /*
+ * This mask/compare operation selects for Physical Function 4 on a
+ * T5. We only need to fix up the Root Port once for any of the
+ * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
+ * 0x54xx so we use that one,
+ */
+ if ((pdev->device & 0xff00) == 0x5400)
+ quirk_disable_root_port_attributes(pdev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_T5_disable_root_port_attributes);
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 508cc56130e3..1723ac1b30e1 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1037,9 +1037,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
- if (r->parent || ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+ ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
@@ -1340,6 +1341,47 @@ void pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
+static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
+{
+ int i;
+ struct resource *parent_r;
+ unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+
+ pci_bus_for_each_resource(b, parent_r, i) {
+ if (!parent_r)
+ continue;
+
+ if ((r->flags & mask) == (parent_r->flags & mask) &&
+ resource_contains(parent_r, r))
+ request_resource(parent_r, r);
+ }
+}
+
+/*
+ * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
+ * are skipped by pbus_assign_resources_sorted().
+ */
+static void pdev_assign_fixed_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct pci_bus *b;
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
+ !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+
+ b = dev->bus;
+ while (b && !r->parent) {
+ assign_fixed_resource_on_bus(b, r);
+ b = b->parent;
+ }
+ }
+}
+
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -1350,6 +1392,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
+ pdev_assign_fixed_resources(dev);
+
b = dev->subordinate;
if (!b)
continue;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 232f9254c11a..604011e047d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -36,6 +36,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
+ if (dev->is_virtfn) {
+ dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ return;
+ }
+
/*
* Ignore resources for unimplemented BARs and unused resource slots
* for 64 bit BARs.
@@ -177,6 +182,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
end = res->end;
res->start = fw_addr;
res->end = res->start + size - 1;
+ res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
if (!root) {
@@ -194,6 +200,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
resno, res, conflict->name, conflict);
res->start = start;
res->end = end;
+ res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
return 0;