aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/vga/Kconfig19
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/pci/Kconfig19
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c9
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c19
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c8
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c57
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c2
-rw-r--r--drivers/pci/controller/pci-aardvark.c392
-rw-r--r--drivers/pci/controller/pci-hyperv.c233
-rw-r--r--drivers/pci/controller/pci-loongson.c2
-rw-r--r--drivers/pci/controller/pci-mvebu.c2
-rw-r--r--drivers/pci/controller/pci-tegra.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c1
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c11
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip.h1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c14
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c22
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c2
-rw-r--r--drivers/pci/p2pdma.c1
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-bridge-emul.c8
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer_inject.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c4
-rw-r--r--drivers/pci/proc.c6
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/vgaarb.c (renamed from drivers/gpu/vga/vgaarb.c)313
46 files changed, 731 insertions, 512 deletions
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 1ad4c4ef0b5e..eb8b14ab22c3 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,23 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config VGA_ARB
- bool "VGA Arbitration" if EXPERT
- default y
- depends on (PCI && !S390)
- help
- Some "legacy" VGA devices implemented on PCI typically have the same
- hard-decoded addresses as they did on ISA. When multiple PCI devices
- are accessed at same time they need some kind of coordination. Please
- see Documentation/gpu/vgaarbiter.rst for more details. Select this to
- enable VGA arbiter.
-
-config VGA_ARB_MAX_GPUS
- int "Maximum number of GPUs"
- default 16
- depends on VGA_ARB
- help
- Reserves space in the kernel to maintain resource locking for
- multiple GPUS. The overhead for each GPU is very small.
-
config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index e92064442d60..9800620deda3 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index d98fafdd0f99..133c73207782 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -252,6 +252,25 @@ config PCIE_BUS_PEER2PEER
endchoice
+config VGA_ARB
+ bool "VGA Arbitration" if EXPERT
+ default y
+ depends on (PCI && !S390)
+ help
+ Some "legacy" VGA devices implemented on PCI typically have the same
+ hard-decoded addresses as they did on ISA. When multiple PCI devices
+ are accessed at same time they need some kind of coordination. Please
+ see Documentation/gpu/vgaarbiter.rst for more details. Select this to
+ enable VGA arbiter.
+
+config VGA_ARB_MAX_GPUS
+ int "Maximum number of GPUs"
+ default 16
+ depends on VGA_ARB
+ help
+ Reserves space in the kernel to maintain resource locking for
+ multiple GPUS. The overhead for each GPU is very small.
+
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 37be95adf169..0da6b1ebc694 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+obj-$(CONFIG_VGA_ARB) += vgaarb.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0d9f6b21babb..708c7529647f 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -159,9 +159,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
* write happen to have any RW1C (write-one-to-clear) bits set, we
* just inadvertently cleared something we shouldn't have.
*/
- dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ if (!bus->unsafe_warn) {
+ dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ bus->unsafe_warn = 1;
+ }
mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
tmp = readl(addr) & mask;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6974bd5aa116..6619e3caffe2 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -453,10 +453,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX7D:
break;
case IMX8MM:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret)
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
case IMX8MQ:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
@@ -809,9 +805,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
+ dw_pcie_wait_for_link(pci);
if (pci->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
@@ -847,11 +841,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
}
/* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
+ dw_pcie_wait_for_link(pci);
} else {
dev_info(dev, "Link: Gen2 disabled\n");
}
@@ -923,6 +913,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
/* Others poke directly at IOMUXC registers */
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6QP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
@@ -983,6 +974,7 @@ static int imx6_pcie_suspend_noirq(struct device *dev)
case IMX8MM:
if (phy_power_off(imx6_pcie->phy))
dev_err(dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
break;
default:
break;
@@ -1252,7 +1244,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
},
[IMX7D] = {
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 1c2ee4e13f1c..d10e5fd0f83c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -531,13 +531,13 @@ static void ks_pcie_quirk(struct pci_dev *dev)
struct pci_dev *bridge;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 686ded034f22..f44bf347904a 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -313,14 +313,14 @@ static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
* cannot program the PCI_CLASS_DEVICE register, so we must fabricate
* the return value in the config accessors.
*/
- if (where == PCI_CLASS_REVISION && size == 4)
- *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
- else if (where == PCI_CLASS_DEVICE && size == 2)
- *val = PCI_CLASS_BRIDGE_PCI;
- else if (where == PCI_CLASS_DEVICE && size == 1)
- *val = PCI_CLASS_BRIDGE_PCI & 0xff;
- else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
- *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+ if ((where & ~3) == PCI_CLASS_REVISION) {
+ if (size <= 2)
+ *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3));
+ *val &= ~0xffffff00;
+ *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ }
return PCIBIOS_SUCCESSFUL;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index f4755f3a03be..2fa86f32d964 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -362,6 +362,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
} else if (pp->has_msi_ctrl) {
+ u32 ctrl, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
if (!pp->msi_irq) {
pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
if (pp->msi_irq < 0) {
@@ -541,7 +547,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
pp->irq_mask[ctrl]);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 00cde9a248b5..02cc70d8cc06 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -181,10 +181,59 @@ static int fu740_pcie_start_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
struct fu740_pcie *afp = dev_get_drvdata(dev);
+ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int ret;
+ u32 orig, tmp;
+
+ /*
+ * Force 2.5GT/s when starting the link, due to some devices not
+ * probing at higher speeds. This happens with the PCIe switch
+ * on the Unmatched board when U-Boot has not initialised the PCIe.
+ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
+ * by the soft reset done by this driver.
+ */
+ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ orig = tmp & PCI_EXP_LNKCAP_SLS;
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
/* Enable LTSSM */
writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
- return 0;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start\n");
+ goto err;
+ }
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
+ dev_dbg(dev, "changing speed back to original\n");
+
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= orig;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start at new speed\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ WARN_ON(ret); /* we assume that errors will be very rare */
+ dw_pcie_dbi_ro_wr_dis(pci);
+ return ret;
}
static int fu740_pcie_host_init(struct pcie_port *pp)
@@ -224,7 +273,7 @@ static int fu740_pcie_host_init(struct pcie_port *pp)
/* Clear hold_phy_rst */
writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
/* Enable pcieauxclk */
- ret = clk_prepare_enable(afp->pcie_aux);
+ clk_prepare_enable(afp->pcie_aux);
/* Set RC mode */
writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
@@ -259,11 +308,11 @@ static int fu740_pcie_probe(struct platform_device *pdev)
return PTR_ERR(afp->mgmt_base);
/* Fetch GPIOs */
- afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW);
+ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(afp->reset))
return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
- afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW);
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW);
if (IS_ERR(afp->pwren))
return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index fa6886d66488..5b56cedebdf1 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -332,9 +332,6 @@ static int hi3660_pcie_phy_init(struct platform_device *pdev,
pcie->phy_priv = phy;
phy->dev = dev;
- /* registers */
- pdev = container_of(dev, struct platform_device, dev);
-
ret = hi3660_pcie_phy_get_clk(phy);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index c19cd506ed3f..a47f1c0434c2 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1634,7 +1634,7 @@ static const struct of_device_id qcom_pcie_match[] = {
static void qcom_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index f3547aa60140..31a7bdebe540 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -295,7 +295,7 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
/* fixup for PCIe class register */
value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
- value |= (PCI_CLASS_BRIDGE_PCI << 16);
+ value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
return 0;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 87c36e12c246..401fb5eb7645 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -38,10 +38,6 @@
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
/* PIO registers base address and register offsets */
#define PIO_BASE_ADDR 0x4000
#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
@@ -102,6 +98,10 @@
#define PCIE_MSG_PM_PME_MASK BIT(7)
#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
+#define PCIE_ISR0_CORR_ERR BIT(11)
+#define PCIE_ISR0_NFAT_ERR BIT(12)
+#define PCIE_ISR0_FAT_ERR BIT(13)
+#define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
#define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
@@ -272,17 +272,16 @@ struct advk_pcie {
u32 actions;
} wins[OB_WIN_COUNT];
u8 wins_count;
+ int irq;
+ struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
- struct irq_chip msi_bottom_irq_chip;
- struct irq_chip msi_irq_chip;
- struct msi_domain_info msi_domain_info;
+ raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
struct mutex msi_used_lock;
- u16 msi_msg;
int link_gen;
struct pci_bridge_emul bridge;
struct gpio_desc *reset_gpio;
@@ -477,6 +476,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
+ phys_addr_t msi_addr;
u32 reg;
int i;
@@ -529,7 +529,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
*/
reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
reg &= ~0xffffff00;
- reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
/* Disable Root Bridge I/O space, memory space and bus mastering */
@@ -565,6 +565,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= LANE_COUNT_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ /* Set MSI address */
+ msi_addr = virt_to_phys(pcie);
+ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
+
/* Enable MSI */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
@@ -576,15 +581,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
- /* Disable All ISR0/1 Sources */
- reg = PCIE_ISR0_ALL_MASK;
+ /* Disable All ISR0/1 and MSI Sources */
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+
+ /* Unmask summary MSI interrupt */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
reg &= ~PCIE_ISR0_MSI_INT_PENDING;
advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
- advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
-
- /* Unmask all MSIs */
- advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+ /* Unmask PME interrupt for processing of PME requester */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ reg &= ~PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@@ -778,11 +788,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
case PCI_INTERRUPT_LINE: {
/*
* From the whole 32bit register we support reading from HW only
- * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
* Other bits are retrieved only from emulated config buffer.
*/
__le32 *cfgspace = (__le32 *)&bridge->conf;
u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
+ val &= ~(PCI_BRIDGE_CTL_SERR << 16);
+ else
+ val |= PCI_BRIDGE_CTL_SERR << 16;
if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
else
@@ -808,6 +822,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_INTERRUPT_LINE:
+ /*
+ * According to Figure 6-3: Pseudo Logic Diagram for Error
+ * Message Controls in PCIe base specification, SERR# Enable bit
+ * in Bridge Control register enable receiving of ERR_* messages
+ */
+ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ if (new & (PCI_BRIDGE_CTL_SERR << 16))
+ val &= ~PCIE_ISR0_ERR_MASK;
+ else
+ val |= PCIE_ISR0_ERR_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ }
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
@@ -835,20 +862,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
*value = PCI_EXP_SLTSTA_PDS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
- case PCI_EXP_RTCTL: {
- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
- *value |= PCI_EXP_RTCAP_CRSVIS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
- }
-
- case PCI_EXP_RTSTA: {
- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
- *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
- return PCI_BRIDGE_EMUL_HANDLED;
- }
+ /*
+ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
+ * to be handled here, because their values are stored in emulated
+ * config space buffer, and we read them from there when needed.
+ */
case PCI_EXP_LNKCAP: {
u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
@@ -903,19 +921,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_RTCTL: {
- /* Only mask/unmask PME interrupt */
- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
- ~PCIE_MSG_PM_PME_MASK;
- if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
- val |= PCIE_MSG_PM_PME_MASK;
- advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
+ /* Only emulation of PMEIE and CRSSVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
- case PCI_EXP_RTSTA:
- new = (new & PCI_EXP_RTSTA_PME) >> 9;
- advk_writel(pcie, new, PCIE_ISR0_REG);
- break;
+ /*
+ * PCI_EXP_RTSTA is also supported, but does not need to be handled
+ * here, because its value is stored in emulated config space buffer,
+ * and we write it there when needed.
+ */
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVCTL2:
@@ -959,7 +976,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
/* Support interrupt A for MSI feature */
- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ bridge->conf.intpin = PCI_INTERRUPT_INTA;
/* Aardvark HW provides PCIe Capability structure in version 2 */
bridge->pcie_conf.cap = cpu_to_le16(2);
@@ -981,8 +998,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
return false;
/*
- * If the link goes down after we check for link-up, nothing bad
- * happens but the config access times out.
+ * If the link goes down after we check for link-up, we have a problem:
+ * if a PIO request is executed while link-down, the whole controller
+ * gets stuck in a non-functional state, and even after link comes up
+ * again, PIO requests won't work anymore, and a reset of the whole PCIe
+ * controller is needed. Therefore we need to prevent sending PIO
+ * requests while the link is down.
*/
if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
return false;
@@ -1180,11 +1201,11 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
struct msi_msg *msg)
{
struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
+ phys_addr_t msi_addr = virt_to_phys(pcie);
- msg->address_lo = lower_32_bits(msi_msg);
- msg->address_hi = upper_32_bits(msi_msg);
- msg->data = data->irq;
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
}
static int advk_msi_set_affinity(struct irq_data *irq_data,
@@ -1193,6 +1214,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data,
return -EINVAL;
}
+static void advk_msi_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask |= BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask &= ~BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void advk_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip advk_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
+ .irq_set_affinity = advk_msi_set_affinity,
+ .irq_mask = advk_msi_irq_mask,
+ .irq_unmask = advk_msi_irq_unmask,
+};
+
static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs, void *args)
@@ -1201,19 +1270,15 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
int hwirq, i;
mutex_lock(&pcie->msi_used_lock);
- hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
- 0, nr_irqs, 0);
- if (hwirq >= MSI_IRQ_NUM) {
- mutex_unlock(&pcie->msi_used_lock);
- return -ENOSPC;
- }
-
- bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+ hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
+ order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i,
- &pcie->msi_bottom_irq_chip,
+ &advk_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
@@ -1227,7 +1292,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain,
struct advk_pcie *pcie = domain->host_data;
mutex_lock(&pcie->msi_used_lock);
- bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+ bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
}
@@ -1269,7 +1334,6 @@ static int advk_pcie_irq_map(struct irq_domain *h,
{
struct advk_pcie *pcie = h->host_data;
- advk_pcie_irq_mask(irq_get_irq_data(virq));
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &pcie->irq_chip,
handle_level_irq);
@@ -1283,37 +1347,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static struct irq_chip advk_msi_irq_chip = {
+ .name = "advk-MSI",
+ .irq_mask = advk_msi_top_irq_mask,
+ .irq_unmask = advk_msi_top_irq_unmask,
+};
+
+static struct msi_domain_info advk_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &advk_msi_irq_chip,
+};
+
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
- struct irq_chip *bottom_ic, *msi_ic;
- struct msi_domain_info *msi_di;
- phys_addr_t msi_msg_phys;
+ raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- bottom_ic = &pcie->msi_bottom_irq_chip;
-
- bottom_ic->name = "MSI";
- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
- bottom_ic->irq_set_affinity = advk_msi_set_affinity;
-
- msi_ic = &pcie->msi_irq_chip;
- msi_ic->name = "advk-MSI";
-
- msi_di = &pcie->msi_domain_info;
- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI;
- msi_di->chip = msi_ic;
-
- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
-
- advk_writel(pcie, lower_32_bits(msi_msg_phys),
- PCIE_MSI_ADDR_LOW_REG);
- advk_writel(pcie, upper_32_bits(msi_msg_phys),
- PCIE_MSI_ADDR_HIGH_REG);
-
pcie->msi_inner_domain =
irq_domain_add_linear(NULL, MSI_IRQ_NUM,
&advk_msi_domain_ops, pcie);
@@ -1321,8 +1373,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
return -ENOMEM;
pcie->msi_domain =
- pci_msi_create_irq_domain(of_node_to_fwnode(node),
- msi_di, pcie->msi_inner_domain);
+ pci_msi_create_irq_domain(dev_fwnode(dev),
+ &advk_msi_domain_info,
+ pcie->msi_inner_domain);
if (!pcie->msi_domain) {
irq_domain_remove(pcie->msi_inner_domain);
return -ENOMEM;
@@ -1363,7 +1416,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
}
irq_chip->irq_mask = advk_pcie_irq_mask;
- irq_chip->irq_mask_ack = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
pcie->irq_domain =
@@ -1385,10 +1437,73 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
irq_domain_remove(pcie->irq_domain);
}
+static struct irq_chip advk_rp_irq_chip = {
+ .name = "advk-RP",
+};
+
+static int advk_pcie_rp_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
+ .map = advk_pcie_rp_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
+{
+ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
+ &advk_pcie_rp_irq_domain_ops,
+ pcie);
+ if (!pcie->rp_irq_domain) {
+ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->rp_irq_domain);
+}
+
+static void advk_pcie_handle_pme(struct advk_pcie *pcie)
+{
+ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
+
+ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
+
+ /*
+ * PCIE_MSG_LOG_REG contains the last inbound message, so store
+ * the requester ID only when PME was not asserted yet.
+ * Also do not trigger PME interrupt when PME is still asserted.
+ */
+ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
+ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
+
+ /*
+ * Trigger PME interrupt only if PMEIE bit in Root Control is set.
+ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
+ */
+ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
+ return;
+
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
+ }
+}
+
static void advk_pcie_handle_msi(struct advk_pcie *pcie)
{
u32 msi_val, msi_mask, msi_status, msi_idx;
- u16 msi_data;
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
@@ -1398,13 +1513,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
if (!(BIT(msi_idx) & msi_status))
continue;
- /*
- * msi_idx contains bits [4:0] of the msi_data and msi_data
- * contains 16bit MSI interrupt number
- */
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
- generic_handle_irq(msi_data);
+ if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
}
advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
@@ -1425,6 +1536,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
+ /* Process PME interrupt as the first one to do not miss PME requester id */
+ if (isr0_status & PCIE_MSG_PM_PME_MASK)
+ advk_pcie_handle_pme(pcie);
+
+ /* Process ERR interrupt */
+ if (isr0_status & PCIE_ISR0_ERR_MASK) {
+ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
+
+ /*
+ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
+ * PCIe interrupt 0
+ */
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
+ }
+
/* Process MSI interrupts */
if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie);
@@ -1437,28 +1564,50 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
PCIE_ISR1_REG);
- generic_handle_domain_irq(pcie->irq_domain, i);
+ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
+ (char)i + 'A');
}
}
-static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+static void advk_pcie_irq_handler(struct irq_desc *desc)
{
- struct advk_pcie *pcie = arg;
- u32 status;
+ struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+
+ chained_irq_enter(chip, desc);
+
+ val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
+ status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
+
+ if (status & PCIE_IRQ_CORE_INT) {
+ advk_pcie_handle_int(pcie);
- status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
- if (!(status & PCIE_IRQ_CORE_INT))
- return IRQ_NONE;
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+ }
- advk_pcie_handle_int(pcie);
+ chained_irq_exit(chip, desc);
+}
- /* Clear interrupt */
- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct advk_pcie *pcie = dev->bus->sysdata;
- return IRQ_HANDLED;
+ /*
+ * Emulated root bridge has its own emulated irq chip and irq domain.
+ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
+ * hwirq for irq_create_mapping() is indexed from zero.
+ */
+ if (pci_is_root_bus(dev->bus))
+ return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
+ else
+ return of_irq_parse_and_map_pci(dev, slot, pin);
}
-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
+static void advk_pcie_disable_phy(struct advk_pcie *pcie)
{
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
@@ -1522,7 +1671,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- int ret, irq;
+ int ret;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge)
@@ -1608,17 +1757,9 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
- pcie);
- if (ret) {
- dev_err(dev, "Failed to register interrupt\n");
- return ret;
- }
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq < 0)
+ return pcie->irq;
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0,
@@ -1667,11 +1808,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
+ ret = advk_pcie_init_rp_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize irq\n");
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
+
bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge);
if (ret < 0) {
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
return ret;
@@ -1719,7 +1873,11 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+ /* Remove IRQ handler */
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+
/* Remove IRQ domains */
+ advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 20ea2ee330b8..2a1481a52489 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -616,6 +616,121 @@ static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
{
return pci_msi_prepare(domain, dev, nvec, info);
}
+
+/**
+ * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data: Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+ struct hv_retarget_device_interrupt *params;
+ struct hv_pcibus_device *hbus;
+ struct cpumask *dest;
+ cpumask_var_t tmp;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u32 var_size = 0;
+ int cpu, nr_bank;
+ u64 res;
+
+ dest = irq_data_get_effective_affinity_mask(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+ params = &hbus->retarget_msi_interrupt_params;
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
+ hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ (hbus->hdev->dev_instance.b[4] << 16) |
+ (hbus->hdev->dev_instance.b[7] << 8) |
+ (hbus->hdev->dev_instance.b[6] & 0xf8) |
+ PCI_FUNC(pdev->devfn);
+ params->int_target.vector = hv_msi_get_int_vector(data);
+
+ /*
+ * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
+ * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+ * spurious interrupt storm. Not doing so does not seem to have a
+ * negative effect (yet?).
+ */
+
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ /*
+ * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+ * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+ * with >64 VP support.
+ * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+ * is not sufficient for this hypercall.
+ */
+ params->int_target.flags |=
+ HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+ res = 1;
+ goto exit_unlock;
+ }
+
+ cpumask_and(tmp, dest, cpu_online_mask);
+ nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+ free_cpumask_var(tmp);
+
+ if (nr_bank <= 0) {
+ res = 1;
+ goto exit_unlock;
+ }
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = 1 + nr_bank;
+ } else {
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ params->int_target.vp_mask |=
+ (1ULL << hv_cpu_number_to_vp_number(cpu));
+ }
+ }
+
+ res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+ params, NULL);
+
+exit_unlock:
+ spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+
+ /*
+ * During hibernation, when a CPU is offlined, the kernel tries
+ * to move the interrupt to the remaining CPUs that haven't
+ * been offlined yet. In this case, the below hv_do_hypercall()
+ * always fails since the vmbus channel has been closed:
+ * refer to cpu_disable_common() -> fixup_irqs() ->
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
+ *
+ * Suppress the error message for hibernation because the failure
+ * during hibernation does not matter (at this time all the devices
+ * have been frozen). Note: the correct affinity info is still updated
+ * into the irqdata data structure in migrate_one_irq() ->
+ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
+ * resumes, hv_pci_restore_msi_state() is able to correctly restore
+ * the interrupt with the correct affinity.
+ */
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
+ dev_err(&hbus->hdev->device,
+ "%s() failed: %#llx", __func__, res);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -839,6 +954,12 @@ static struct irq_domain *hv_pci_get_root_domain(void)
{
return hv_msi_gic_irq_domain;
}
+
+/*
+ * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
+ * registers which Hyper-V already supports, so no hypercall needed.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data) { }
#endif /* CONFIG_ARM64 */
/**
@@ -1456,119 +1577,9 @@ static void hv_irq_mask(struct irq_data *data)
irq_chip_mask_parent(data);
}
-/**
- * hv_irq_unmask() - "Unmask" the IRQ by setting its current
- * affinity.
- * @data: Describes the IRQ
- *
- * Build new a destination for the MSI and make a hypercall to
- * update the Interrupt Redirection Table. "Device Logical ID"
- * is built out of this PCI bus's instance GUID and the function
- * number of the device.
- */
static void hv_irq_unmask(struct irq_data *data)
{
- struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
- struct hv_retarget_device_interrupt *params;
- struct hv_pcibus_device *hbus;
- struct cpumask *dest;
- cpumask_var_t tmp;
- struct pci_bus *pbus;
- struct pci_dev *pdev;
- unsigned long flags;
- u32 var_size = 0;
- int cpu, nr_bank;
- u64 res;
-
- dest = irq_data_get_effective_affinity_mask(data);
- pdev = msi_desc_to_pci_dev(msi_desc);
- pbus = pdev->bus;
- hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
-
- spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
-
- params = &hbus->retarget_msi_interrupt_params;
- memset(params, 0, sizeof(*params));
- params->partition_id = HV_PARTITION_ID_SELF;
- params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
- hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
- params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
- (hbus->hdev->dev_instance.b[4] << 16) |
- (hbus->hdev->dev_instance.b[7] << 8) |
- (hbus->hdev->dev_instance.b[6] & 0xf8) |
- PCI_FUNC(pdev->devfn);
- params->int_target.vector = hv_msi_get_int_vector(data);
-
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
- if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
- /*
- * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
- * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
- * with >64 VP support.
- * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
- * is not sufficient for this hypercall.
- */
- params->int_target.flags |=
- HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
-
- if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
- res = 1;
- goto exit_unlock;
- }
-
- cpumask_and(tmp, dest, cpu_online_mask);
- nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
- free_cpumask_var(tmp);
-
- if (nr_bank <= 0) {
- res = 1;
- goto exit_unlock;
- }
-
- /*
- * var-sized hypercall, var-size starts after vp_mask (thus
- * vp_set.format does not count, but vp_set.valid_bank_mask
- * does).
- */
- var_size = 1 + nr_bank;
- } else {
- for_each_cpu_and(cpu, dest, cpu_online_mask) {
- params->int_target.vp_mask |=
- (1ULL << hv_cpu_number_to_vp_number(cpu));
- }
- }
-
- res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
- params, NULL);
-
-exit_unlock:
- spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
-
- /*
- * During hibernation, when a CPU is offlined, the kernel tries
- * to move the interrupt to the remaining CPUs that haven't
- * been offlined yet. In this case, the below hv_do_hypercall()
- * always fails since the vmbus channel has been closed:
- * refer to cpu_disable_common() -> fixup_irqs() ->
- * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
- *
- * Suppress the error message for hibernation because the failure
- * during hibernation does not matter (at this time all the devices
- * have been frozen). Note: the correct affinity info is still updated
- * into the irqdata data structure in migrate_one_irq() ->
- * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
- * resumes, hv_pci_restore_msi_state() is able to correctly restore
- * the interrupt with the correct affinity.
- */
- if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
- dev_err(&hbus->hdev->device,
- "%s() failed: %#llx", __func__, res);
+ hv_arch_irq_unmask(data);
if (data->parent_data->chip->irq_unmask)
irq_chip_unmask_parent(data);
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 48169b1e3817..50a8e1d6f70a 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -35,7 +35,7 @@ struct loongson_pci {
/* Fixup wrong class code in PCIe bridges */
static void bridge_class_quirk(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_PCIE_PORT_0, bridge_class_quirk);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index a75d2b9196f9..24c521d475d9 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -286,7 +286,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
*/
dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
dev_rev &= ~0xffffff00;
- dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
/* Point PCIe unit MBUS decode windows to DRAM space. */
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index cb0aa65d6934..0457ec02ab70 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -726,7 +726,7 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
/* Tegra PCIE root complex wrongly reports device class */
static void tegra_pcie_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 0d5acbfc7143..77c1fe7e11f9 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -49,7 +49,6 @@
#define EN_REG 0x00000001
#define OB_LO_IO 0x00000002
#define XGENE_PCIE_DEVICEID 0xE004
-#define SZ_1T (SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
#define XGENE_V1_PCI_EXP_CAP 0x40
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index 54b6e6d5bc64..99a99900444d 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -18,7 +18,7 @@
/* NS: CLASS field is R/O, and set to wrong 0x200 value */
static void bcma_pcie2_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index b3e75bc61ff1..2519201b0e51 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -789,14 +789,13 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
return -EFAULT;
}
- /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
+ /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
-#define PCI_CLASS_BRIDGE_MASK 0xffff00
-#define PCI_CLASS_BRIDGE_SHIFT 8
+#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff
iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, &class);
- class &= ~PCI_CLASS_BRIDGE_MASK;
- class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+ class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK;
+ class |= PCI_CLASS_BRIDGE_PCI_NORMAL;
iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, class);
@@ -1581,7 +1580,7 @@ static void quirk_paxc_bridge(struct pci_dev *pdev)
* code that the bridge is not an Ethernet device.
*/
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
/*
* MPSS is not being set properly (as it is currently 0). This is
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 7705d61fba4c..3e8d70bfabc6 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -292,7 +292,7 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
- val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
+ val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
/* Mask all INTx interrupts */
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 38b6e02edfa9..dfca59c4ae34 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -370,7 +370,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
* class to match. Hardware takes care of propagating the IDSETR
* settings, so there is no need to bother with a quirk.
*/
- rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
/*
* Setup Secondary Bus Number & Subordinate Bus Number, even though
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 45a28880f322..7f56f99b4116 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -370,7 +370,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
- PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
PCIE_RC_CONFIG_RID_CCR);
/* Clear THP cap's next cap pointer to remove L1 substate cap */
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 1650a5087450..32c3a859c26b 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -134,7 +134,6 @@
#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_SCC_SHIFT 16
#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 90d84d3bc868..5b833f00e980 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -285,7 +285,17 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
- memcpy(dst_addr, src_addr, reg->size);
+ void *buf;
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ memcpy_fromio(buf, src_addr, reg->size);
+ memcpy_toio(dst_addr, buf, reg->size);
+ kfree(buf);
}
ktime_get_ts64(&end);
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
@@ -441,7 +451,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
ret = -EINVAL;
- goto err_map_addr;
+ goto err_dma_map;
}
src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 12f4b351be67..6efa3d8db9a5 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -226,9 +226,9 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev)
static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
void **rv)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct acpiphp_bridge *bridge = data;
struct acpiphp_context *context;
- struct acpi_device *adev;
struct acpiphp_slot *slot;
struct acpiphp_func *newfunc;
acpi_status status = AE_OK;
@@ -238,6 +238,9 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
struct pci_dev *pdev = bridge->pci_dev;
u32 val;
+ if (!adev)
+ return AE_OK;
+
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
@@ -245,8 +248,6 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
"can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index df48b3b03ab4..8f3a0a33f362 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -433,8 +433,9 @@ static int __init ibm_acpiphp_init(void)
goto init_return;
}
pr_debug("%s: found IBM aPCI device\n", __func__);
- if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
- pr_err("%s: acpi_bus_get_device failed\n", __func__);
+ device = acpi_fetch_acpi_dev(ibm_acpi_handle);
+ if (!device) {
+ pr_err("%s: acpi_fetch_acpi_dev failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f99a7927e5a8..c94b40e64baf 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1254,7 +1254,7 @@ static void __exit unload_cpqphpd(void)
struct pci_resource *res;
struct pci_resource *tres;
- rc = compaq_nvram_store(cpqhp_rom_start);
+ compaq_nvram_store(cpqhp_rom_start);
ctrl = cpqhp_ctrl_list;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 93fd2a621822..e429ecddc8fe 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -881,7 +881,6 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
u8 reset;
u16 misc;
u32 Diff;
- u32 temp_dword;
misc = readw(ctrl->hpc_reg + MISC);
@@ -917,7 +916,7 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR);
/* Read it back to clear any posted writes */
- temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+ readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
if (!Diff)
/* Clear all interrupts */
@@ -1412,7 +1411,6 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
u32 rc = 0;
struct pci_func *new_slot = NULL;
struct pci_bus *bus = ctrl->pci_bus;
- struct slot *p_slot;
struct resource_lists res_lists;
hp_slot = func->device - ctrl->slot_device_offset;
@@ -1459,7 +1457,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
if (rc)
return rc;
- p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+ cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1614,7 +1612,6 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
u8 device;
u8 hp_slot;
u8 temp_byte;
- u32 rc;
struct resource_lists res_lists;
struct pci_func *temp_func;
@@ -1629,7 +1626,7 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
/* When we get here, it is safe to change base address registers.
* We will attempt to save the base address register lengths */
if (replace_flag || !ctrl->add_support)
- rc = cpqhp_save_base_addr_length(ctrl, func);
+ cpqhp_save_base_addr_length(ctrl, func);
else if (!func->bus_head && !func->mem_head &&
!func->p_mem_head && !func->io_head) {
/* Here we check to see if we've saved any of the board's
@@ -1647,7 +1644,7 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
}
if (!skip)
- rc = cpqhp_save_used_resources(ctrl, func);
+ cpqhp_save_used_resources(ctrl, func);
}
/* Change status to shutdown */
if (func->is_a_board)
@@ -1767,7 +1764,7 @@ void cpqhp_event_stop_thread(void)
static void interrupt_event_handler(struct controller *ctrl)
{
- int loop = 0;
+ int loop;
int change = 1;
struct pci_func *func;
u8 hp_slot;
@@ -1885,7 +1882,6 @@ static void interrupt_event_handler(struct controller *ctrl)
void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
- u8 device;
struct pci_func *func;
struct slot *p_slot = from_timer(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
@@ -1893,8 +1889,6 @@ void cpqhp_pushbutton_thread(struct timer_list *t)
pushbutton_pending = NULL;
hp_slot = p_slot->hp_slot;
- device = p_slot->device;
-
if (is_slot_enabled(ctrl, hp_slot)) {
p_slot->state = POWEROFF_STATE;
/* power Down board */
@@ -1951,15 +1945,12 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
u32 tempdword;
int rc;
struct slot *p_slot;
- int physical_slot = 0;
tempdword = 0;
device = func->device;
hp_slot = device - ctrl->slot_device_offset;
p_slot = cpqhp_find_slot(ctrl, device);
- if (p_slot)
- physical_slot = p_slot->number;
/* Check to see if the interlock is closed */
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -2043,13 +2034,10 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
unsigned int devfn;
struct slot *p_slot;
struct pci_bus *pci_bus = ctrl->pci_bus;
- int physical_slot = 0;
device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
- if (p_slot)
- physical_slot = p_slot->number;
/* Make sure there are no video controllers here */
while (func && !rc) {
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 9038039ad6db..3b248426a9f4 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -473,7 +473,7 @@ int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot)
int sub_bus;
int max_functions;
int function = 0;
- int cloop = 0;
+ int cloop;
int stop_it;
ID = 0xFFFFFFFF;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 508a62a6b5f9..a5720d12e573 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -325,11 +325,9 @@ static u8 i2c_ctrl_write(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
static u8 isa_ctrl_read(struct controller *ctlr_ptr, u8 offset)
{
u16 start_address;
- u16 end_address;
u8 data;
start_address = ctlr_ptr->u.isa_ctlr.io_start;
- end_address = ctlr_ptr->u.isa_ctlr.io_end;
data = inb(start_address + offset);
return data;
}
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ae9acc77d14f..4a72ade2cddb 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -1955,7 +1955,7 @@ static int __init update_bridge_ranges(struct bus_node **bus)
bus_sec = find_bus_wprev(sec_busno, NULL, 0);
/* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
if (!bus_sec) {
- bus_sec = alloc_error_bus(NULL, sec_busno, 1);
+ alloc_error_bus(NULL, sec_busno, 1);
/* the rest will be populated during NVRAM call */
return 0;
}
@@ -2114,6 +2114,5 @@ static int __init update_bridge_ranges(struct bus_node **bus)
} /* end for function */
} /* end for device */
- bus = &bus_cur;
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1c1ebf3dad43..040ae076ec0e 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -98,6 +98,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
if (slot_status & PCI_EXP_SLTSTA_CC) {
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_CC);
+ ctrl->cmd_busy = 0;
+ smp_mb();
return 1;
}
msleep(10);
@@ -1084,6 +1086,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 81a918d47895..53692b048301 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -312,7 +312,7 @@ static void shpc_remove(struct pci_dev *dev)
}
static const struct pci_device_id shpcd_pci_tbl[] = {
- {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)},
+ {PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0)},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 1015274bd2fe..30b1df3c9d2f 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -321,6 +321,7 @@ static const struct pci_p2pdma_whitelist_entry {
{PCI_VENDOR_ID_INTEL, 0x2032, 0},
{PCI_VENDOR_ID_INTEL, 0x2033, 0},
{PCI_VENDOR_ID_INTEL, 0x2020, 0},
+ {PCI_VENDOR_ID_INTEL, 0x09a2, 0},
{}
};
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a42dbf448860..1f15ab7eabf8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -89,9 +89,9 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
return -ENODEV;
}
- ret = acpi_bus_get_device(handle, &adev);
- if (ret)
- return ret;
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return -ENODEV;
ret = acpi_get_rc_addr(adev, res);
if (ret) {
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index a5b662cc89d0..9c2ca28e3ecf 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -350,10 +350,12 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
/*
- * class_revision: Class is high 24 bits and revision is low 8 bit of this member,
- * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8
+ * class_revision: Class is high 24 bits and revision is low 8 bit
+ * of this member, while class for PCI Bridge Normal Decode has the
+ * 24-bit value: PCI_CLASS_BRIDGE_PCI_NORMAL
*/
- bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8);
+ bridge->conf.class_revision |=
+ cpu_to_le32(PCI_CLASS_BRIDGE_PCI_NORMAL << 8);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 602f0fb0b007..c263ffc5884a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -754,8 +754,6 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
u8 val;
pci_user_read_config_byte(dev, off, &val);
data[off - init_off] = val;
- off++;
- --size;
}
pci_config_pm_runtime_put(dev);
@@ -818,11 +816,8 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
size -= 2;
}
- if (size) {
+ if (size)
pci_user_write_config_byte(dev, off, data[off - init_off]);
- off++;
- --size;
- }
pci_config_pm_runtime_put(dev);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 45a2ef702b45..788ac8df3f9d 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -43,7 +43,7 @@ config PCIEAER_INJECT
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
gotten from:
- https://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
#
# PCI Express ECRC
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 767f8859b99b..2dab275d252f 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -6,7 +6,7 @@
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
- * https://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ * https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
*
* Copyright 2009 Intel Corporation.
* Huang Ying <ying.huang@intel.com>
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 35eca6277a96..4b8801656ffb 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -178,9 +178,9 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
*/
static const struct pci_device_id port_pci_ids[] = {
/* handle any PCI-Express port */
- { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) },
/* subtractive decode PCI-to-PCI bridge, class type is 060401h */
- { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
/* handle any Root Complex Event Collector */
{ PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ },
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9c7edec64f7e..31b26d8ea6cc 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -99,9 +99,7 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
unsigned char val;
pci_user_read_config_byte(dev, pos, &val);
__put_user(val, buf);
- buf++;
pos++;
- cnt--;
}
pci_config_pm_runtime_put(dev);
@@ -176,9 +174,7 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
unsigned char val;
__get_user(val, buf);
pci_user_write_config_byte(dev, pos, val);
- buf++;
pos++;
- cnt--;
}
pci_config_pm_runtime_put(dev);
@@ -188,10 +184,12 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
return nbytes;
}
+#ifdef HAVE_PCI_MMAP
struct pci_filp_private {
enum pci_mmap_state mmap_state;
int write_combine;
};
+#endif /* HAVE_PCI_MMAP */
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d2dd6a6cda60..5f46fed01e6c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1811,6 +1811,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
+static void quirk_no_msi(struct pci_dev *dev)
+{
+ pci_info(dev, "avoiding MSI to work around a hardware defect\n");
+ dev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi);
+
static void quirk_pcie_mch(struct pci_dev *pdev)
{
pdev->no_msi = 1;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 547396ec50b5..8cb68e6f6ef9 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -994,7 +994,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
+ resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1525,7 +1525,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
{
struct pci_dev *dev = bus->self;
struct resource *r;
- unsigned int old_flags = 0;
+ unsigned int old_flags;
struct resource *b_res;
int idx = 1;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/pci/vgaarb.c
index 569930552957..f80b6ec88dc3 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -1,32 +1,11 @@
+// SPDX-License-Identifier: MIT
/*
* vgaarb.c: Implements the VGA arbitration. For details refer to
* Documentation/gpu/vgaarbiter.rst
*
- *
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS
- * IN THE SOFTWARE.
- *
*/
#define pr_fmt(fmt) "vgaarb: " fmt
@@ -72,6 +51,7 @@ struct vga_device {
unsigned int io_norm_cnt; /* normal IO count */
unsigned int mem_norm_cnt; /* normal MEM count */
bool bridge_has_one_vga;
+ bool is_firmware_default; /* device selected by firmware */
unsigned int (*set_decode)(struct pci_dev *pdev, bool decode);
};
@@ -122,8 +102,6 @@ both:
/* this is only used a cookie - it should not be dereferenced */
static struct pci_dev *vga_default;
-static void vga_arb_device_card_gone(struct pci_dev *pdev);
-
/* Find somebody in our list */
static struct vga_device *vgadev_find(struct pci_dev *pdev)
{
@@ -565,6 +543,144 @@ bail:
}
EXPORT_SYMBOL(vga_put);
+static bool vga_is_firmware_default(struct pci_dev *pdev)
+{
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
+ u64 limit;
+ resource_size_t start, end;
+ unsigned long flags;
+ int i;
+
+ /* Select the device owning the boot framebuffer if there is one */
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)screen_info.ext_lfb_base << 32;
+
+ limit = base + size;
+
+ /* Does firmware framebuffer belong to us? */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ flags = pci_resource_flags(pdev, i);
+
+ if ((flags & IORESOURCE_MEM) == 0)
+ continue;
+
+ start = pci_resource_start(pdev, i);
+ end = pci_resource_end(pdev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (base < start || limit >= end)
+ continue;
+
+ return true;
+ }
+#endif
+ return false;
+}
+
+static bool vga_arb_integrated_gpu(struct device *dev)
+{
+#if defined(CONFIG_ACPI)
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ return adev && !strcmp(acpi_device_hid(adev), ACPI_VIDEO_HID);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Return true if vgadev is a better default VGA device than the best one
+ * we've seen so far.
+ */
+static bool vga_is_boot_device(struct vga_device *vgadev)
+{
+ struct vga_device *boot_vga = vgadev_find(vga_default_device());
+ struct pci_dev *pdev = vgadev->pdev;
+ u16 cmd, boot_cmd;
+
+ /*
+ * We select the default VGA device in this order:
+ * Firmware framebuffer (see vga_arb_select_default_device())
+ * Legacy VGA device (owns VGA_RSRC_LEGACY_MASK)
+ * Non-legacy integrated device (see vga_arb_select_default_device())
+ * Non-legacy discrete device (see vga_arb_select_default_device())
+ * Other device (see vga_arb_select_default_device())
+ */
+
+ /*
+ * We always prefer a firmware default device, so if we've already
+ * found one, there's no need to consider vgadev.
+ */
+ if (boot_vga && boot_vga->is_firmware_default)
+ return false;
+
+ if (vga_is_firmware_default(pdev)) {
+ vgadev->is_firmware_default = true;
+ return true;
+ }
+
+ /*
+ * A legacy VGA device has MEM and IO enabled and any bridges
+ * leading to it have PCI_BRIDGE_CTL_VGA enabled so the legacy
+ * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], etc) are
+ * routed to it.
+ *
+ * We use the first one we find, so if we've already found one,
+ * vgadev is no better.
+ */
+ if (boot_vga &&
+ (boot_vga->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)
+ return false;
+
+ if ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)
+ return true;
+
+ /*
+ * If we haven't found a legacy VGA device, accept a non-legacy
+ * device. It may have either IO or MEM enabled, and bridges may
+ * not have PCI_BRIDGE_CTL_VGA enabled, so it may not be able to
+ * use legacy VGA resources. Prefer an integrated GPU over others.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+
+ /*
+ * An integrated GPU overrides a previous non-legacy
+ * device. We expect only a single integrated GPU, but if
+ * there are more, we use the *last* because that was the
+ * previous behavior.
+ */
+ if (vga_arb_integrated_gpu(&pdev->dev))
+ return true;
+
+ /*
+ * We prefer the first non-legacy discrete device we find.
+ * If we already found one, vgadev is no better.
+ */
+ if (boot_vga) {
+ pci_read_config_word(boot_vga->pdev, PCI_COMMAND,
+ &boot_cmd);
+ if (boot_cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * vgadev has neither IO nor MEM enabled. If we haven't found any
+ * other VGA devices, it is the best candidate so far.
+ */
+ if (!boot_vga)
+ return true;
+
+ return false;
+}
+
/*
* Rules for using a bridge to control a VGA descendant decoding: if a bridge
* has only one VGA descendant then it can be used to control the VGA routing
@@ -582,8 +698,10 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
vgadev->bridge_has_one_vga = true;
- if (list_empty(&vga_list))
+ if (list_empty(&vga_list)) {
+ vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n");
return;
+ }
/* okay iterate the new devices bridge hierarachy */
new_bus = vgadev->pdev->bus;
@@ -622,6 +740,11 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
}
new_bus = new_bus->parent;
}
+
+ if (vgadev->bridge_has_one_vga)
+ vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n");
+ else
+ vgaarb_info(&vgadev->pdev->dev, "no bridge control possible\n");
}
/*
@@ -692,12 +815,10 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
bus = bus->parent;
}
- /* Deal with VGA default device. Use first enabled one
- * by default if arch doesn't have it's own hook
- */
- if (vga_default == NULL &&
- ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- vgaarb_info(&pdev->dev, "setting as boot VGA device\n");
+ if (vga_is_boot_device(vgadev)) {
+ vgaarb_info(&pdev->dev, "setting as boot VGA device%s\n",
+ vga_default_device() ?
+ " (overriding previous)" : "");
vga_set_default_device(pdev);
}
@@ -741,10 +862,6 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
/* Remove entry from list */
list_del(&vgadev->list);
vga_count--;
- /* Notify userland driver that the device is gone so it discards
- * it's copies of the pci_dev pointer
- */
- vga_arb_device_card_gone(pdev);
/* Wake up all possible waiters */
wake_up_all(&vga_wait_queue);
@@ -994,9 +1111,7 @@ static ssize_t vga_arb_read(struct file *file, char __user *buf,
if (lbuf == NULL)
return -ENOMEM;
- /* Shields against vga_arb_device_card_gone (pci_dev going
- * away), and allows access to vga list
- */
+ /* Protects vga_list */
spin_lock_irqsave(&vga_lock, flags);
/* If we are targeting the default, use it */
@@ -1013,8 +1128,6 @@ static ssize_t vga_arb_read(struct file *file, char __user *buf,
/* Wow, it's not in the list, that shouldn't happen,
* let's fix us up and return invalid card
*/
- if (pdev == priv->target)
- vga_arb_device_card_gone(pdev);
spin_unlock_irqrestore(&vga_lock, flags);
len = sprintf(lbuf, "invalid");
goto done;
@@ -1022,7 +1135,7 @@ static ssize_t vga_arb_read(struct file *file, char __user *buf,
/* Fill the buffer with infos */
len = snprintf(lbuf, 1024,
- "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
+ "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%u:%u)\n",
vga_decode_count, pci_name(pdev),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
@@ -1358,10 +1471,6 @@ static int vga_arb_release(struct inode *inode, struct file *file)
return 0;
}
-static void vga_arb_device_card_gone(struct pci_dev *pdev)
-{
-}
-
/*
* callback any registered clients to let them know we have a
* change in VGA cards
@@ -1430,111 +1539,10 @@ static struct miscdevice vga_arb_device = {
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
};
-#if defined(CONFIG_ACPI)
-static bool vga_arb_integrated_gpu(struct device *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
-
- return adev && !strcmp(acpi_device_hid(adev), ACPI_VIDEO_HID);
-}
-#else
-static bool vga_arb_integrated_gpu(struct device *dev)
-{
- return false;
-}
-#endif
-
-static void __init vga_arb_select_default_device(void)
-{
- struct pci_dev *pdev, *found = NULL;
- struct vga_device *vgadev;
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- u64 limit;
- resource_size_t start, end;
- unsigned long flags;
- int i;
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- limit = base + size;
-
- list_for_each_entry(vgadev, &vga_list, list) {
- struct device *dev = &vgadev->pdev->dev;
- /*
- * Override vga_arbiter_add_pci_device()'s I/O based detection
- * as it may take the wrong device (e.g. on Apple system under
- * EFI).
- *
- * Select the device owning the boot framebuffer if there is
- * one.
- */
-
- /* Does firmware framebuffer belong to us? */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- flags = pci_resource_flags(vgadev->pdev, i);
-
- if ((flags & IORESOURCE_MEM) == 0)
- continue;
-
- start = pci_resource_start(vgadev->pdev, i);
- end = pci_resource_end(vgadev->pdev, i);
-
- if (!start || !end)
- continue;
-
- if (base < start || limit >= end)
- continue;
-
- if (!vga_default_device())
- vgaarb_info(dev, "setting as boot device\n");
- else if (vgadev->pdev != vga_default_device())
- vgaarb_info(dev, "overriding boot device\n");
- vga_set_default_device(vgadev->pdev);
- }
- }
-#endif
-
- if (!vga_default_device()) {
- list_for_each_entry_reverse(vgadev, &vga_list, list) {
- struct device *dev = &vgadev->pdev->dev;
- u16 cmd;
-
- pdev = vgadev->pdev;
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
- found = pdev;
- if (vga_arb_integrated_gpu(dev))
- break;
- }
- }
- }
-
- if (found) {
- vgaarb_info(&found->dev, "setting as boot device (VGA legacy resources not available)\n");
- vga_set_default_device(found);
- return;
- }
-
- if (!vga_default_device()) {
- vgadev = list_first_entry_or_null(&vga_list,
- struct vga_device, list);
- if (vgadev) {
- struct device *dev = &vgadev->pdev->dev;
- vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
- vga_set_default_device(vgadev->pdev);
- }
- }
-}
-
static int __init vga_arb_device_init(void)
{
int rc;
struct pci_dev *pdev;
- struct vga_device *vgadev;
rc = misc_register(&vga_arb_device);
if (rc < 0)
@@ -1550,18 +1558,7 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- list_for_each_entry(vgadev, &vga_list, list) {
- struct device *dev = &vgadev->pdev->dev;
-
- if (vgadev->bridge_has_one_vga)
- vgaarb_info(dev, "bridge control possible\n");
- else
- vgaarb_info(dev, "no bridge control possible\n");
- }
-
- vga_arb_select_default_device();
-
pr_info("loaded\n");
return rc;
}
-subsys_initcall(vga_arb_device_init);
+subsys_initcall_sync(vga_arb_device_init);