diff options
99 files changed, 4201 insertions, 1186 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d3814789304f..254d8a369f32 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3465,12 +3465,13 @@ specify the device is described above. If <order of align> is not specified, PAGE_SIZE is used as alignment. - PCI-PCI bridge can be specified, if resource + A PCI-PCI bridge can be specified if resource windows need to be expanded. To specify the alignment for several instances of a device, the PCI vendor, device, subvendor, and subdevice may be - specified, e.g., 4096@pci:8086:9c22:103c:198f + specified, e.g., 12@pci:8086:9c22:103c:198f + for 4096-byte alignment. ecrc= Enable/disable PCIe ECRC (transaction layer end-to-end CRC checking). bios: Use BIOS/firmware settings. This is the diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index 5561a1c060d0..78494c4050f7 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt @@ -11,7 +11,6 @@ Required properties: the ATU address space. (The old way of getting the configuration address space from "ranges" is deprecated and should be avoided.) -- num-lanes: number of lanes to use RC mode: - #address-cells: set to <3> - #size-cells: set to <2> @@ -34,6 +33,11 @@ Optional properties: - clock-names: Must include the following entries: - "pcie" - "pcie_bus" +- snps,enable-cdm-check: This is a boolean property and if present enables + automatic checking of CDM (Configuration Dependent Module) registers + for data corruption. CDM registers include standard PCIe configuration + space registers, Port Logic registers, DMA and iATU (internal Address + Translation Unit) registers. RC mode: - num-viewport: number of view ports configured in hardware. If a platform does not specify it, the driver assumes 2. diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index a7f5f5afa0e6..de4b2baf91e8 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt @@ -50,7 +50,7 @@ Additional required properties for imx7d-pcie and imx8mq-pcie: - power-domains: Must be set to a phandle pointing to PCIE_PHY power domain - resets: Must contain phandles to PCIe-related reset lines exposed by SRC IP block -- reset-names: Must contain the following entires: +- reset-names: Must contain the following entries: - "pciephy" - "apps" - "turnoff" diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt index 92437a366e5f..7468d666763a 100644 --- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt @@ -6,6 +6,7 @@ Required properties: "mediatek,mt2712-pcie" "mediatek,mt7622-pcie" "mediatek,mt7623-pcie" + "mediatek,mt7629-pcie" - device_type: Must be "pci" - reg: Base addresses and lengths of the PCIe subsys and root ports. - reg-names: Names of the above areas to use during resource lookup. diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt new file mode 100644 index 000000000000..b739f92da58e --- /dev/null +++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt @@ -0,0 +1,171 @@ +NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based) + +This PCIe host controller is based on the Synopsis Designware PCIe IP +and thus inherits all the common properties defined in designware-pcie.txt. + +Required properties: +- compatible: For Tegra19x, must contain "nvidia,tegra194-pcie". +- device_type: Must be "pci" +- power-domains: A phandle to the node that controls power to the respective + PCIe controller and a specifier name for the PCIe controller. Following are + the specifiers for the different PCIe controllers + TEGRA194_POWER_DOMAIN_PCIEX8B: C0 + TEGRA194_POWER_DOMAIN_PCIEX1A: C1 + TEGRA194_POWER_DOMAIN_PCIEX1A: C2 + TEGRA194_POWER_DOMAIN_PCIEX1A: C3 + TEGRA194_POWER_DOMAIN_PCIEX4A: C4 + TEGRA194_POWER_DOMAIN_PCIEX8A: C5 + these specifiers are defined in + "include/dt-bindings/power/tegra194-powergate.h" file. +- reg: A list of physical base address and length pairs for each set of + controller registers. Must contain an entry for each entry in the reg-names + property. +- reg-names: Must include the following entries: + "appl": Controller's application logic registers + "config": As per the definition in designware-pcie.txt + "atu_dma": iATU and DMA registers. This is where the iATU (internal Address + Translation Unit) registers of the PCIe core are made available + for SW access. + "dbi": The aperture where root port's own configuration registers are + available +- interrupts: A list of interrupt outputs of the controller. Must contain an + entry for each entry in the interrupt-names property. +- interrupt-names: Must include the following entries: + "intr": The Tegra interrupt that is asserted for controller interrupts + "msi": The Tegra interrupt that is asserted when an MSI is received +- bus-range: Range of bus numbers associated with this controller +- #address-cells: Address representation for root ports (must be 3) + - cell 0 specifies the bus and device numbers of the root port: + [23:16]: bus number + [15:11]: device number + - cell 1 denotes the upper 32 address bits and should be 0 + - cell 2 contains the lower 32 address bits and is used to translate to the + CPU address space +- #size-cells: Size representation for root ports (must be 2) +- ranges: Describes the translation of addresses for root ports and standard + PCI regions. The entries must be 7 cells each, where the first three cells + correspond to the address as described for the #address-cells property + above, the fourth and fifth cells are for the physical CPU address to + translate to and the sixth and seventh cells are as described for the + #size-cells property above. + - Entries setup the mapping for the standard I/O, memory and + prefetchable PCI regions. The first cell determines the type of region + that is setup: + - 0x81000000: I/O memory region + - 0x82000000: non-prefetchable memory region + - 0xc2000000: prefetchable memory region + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- #interrupt-cells: Size representation for interrupts (must be 1) +- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- clocks: Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names: Must include the following entries: + - core +- resets: Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. +- reset-names: Must include the following entries: + - apb + - core +- phys: Must contain a phandle to P2U PHY for each entry in phy-names. +- phy-names: Must include an entry for each active lane. + "p2u-N": where N ranges from 0 to one less than the total number of lanes +- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed + by controller-id. Following are the controller ids for each controller. + 0: C0 + 1: C1 + 2: C2 + 3: C3 + 4: C4 + 5: C5 +- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals + +Optional properties: +- pinctrl-names: A list of pinctrl state names. + It is mandatory for C5 controller and optional for other controllers. + - "default": Configures PCIe I/O for proper operation. +- pinctrl-0: phandle for the 'default' state of pin configuration. + It is mandatory for C5 controller and optional for other controllers. +- supports-clkreq: Refer to Documentation/devicetree/bindings/pci/pci.txt +- nvidia,update-fc-fixup: This is a boolean property and needs to be present to + improve performance when a platform is designed in such a way that it + satisfies at least one of the following conditions thereby enabling root + port to exchange optimum number of FC (Flow Control) credits with + downstream devices + 1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS) + 2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and + a) speed is Gen-2 and MPS is 256B + b) speed is >= Gen-3 with any MPS +- nvidia,aspm-cmrt-us: Common Mode Restore Time for proper operation of ASPM + to be specified in microseconds +- nvidia,aspm-pwr-on-t-us: Power On time for proper operation of ASPM to be + specified in microseconds +- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be + specified in microseconds +- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot + if the platform has one such slot. (Ex:- x16 slot owned by C5 controller + in p2972-0000 platform). +- vpcie12v-supply: A phandle to the regulator node that supplies 12V to the slot + if the platform has one such slot. (Ex:- x16 slot owned by C5 controller + in p2972-0000 platform). + +Examples: +========= + +Tegra194: +-------- + + pcie@14180000 { + compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */ + 0x00 0x38040000 0x0 0x00040000>; /* iATU_DMA reg space (256K) */ + reg-names = "appl", "config", "atu_dma"; + + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; + num-lanes = <8>; + linux,pci-domain = <0>; + + pinctrl-names = "default"; + pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>; + + clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>; + clock-names = "core"; + + resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>, + <&bpmp TEGRA194_RESET_PEX0_CORE_0>; + reset-names = "apb", "core"; + + interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */ + <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */ + interrupt-names = "intr", "msi"; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; + + nvidia,bpmp = <&bpmp 0>; + + supports-clkreq; + nvidia,aspm-cmrt-us = <60>; + nvidia,aspm-pwr-on-t-us = <20>; + nvidia,aspm-l0s-entrance-latency-us = <3>; + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */ + 0x82000000 0x0 0x38200000 0x0 0x38200000 0x0 0x01E00000 /* non-prefetchable memory (30MB) */ + 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>; /* prefetchable memory (16GB) */ + + vddio-pex-ctl-supply = <&vdd_1v8ao>; + vpcie3v3-supply = <&vdd_3v3_pcie>; + vpcie12v-supply = <&vdd_12v_pcie>; + + phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>, + <&p2u_hsio_5>; + phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; + }; diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt index 8324a4ee6f06..7a813d0e6d63 100644 --- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt +++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt @@ -11,7 +11,7 @@ Required properties: - reg-names: - "ctrl" for the control register region - "config" for the config space region -- interrupts: Interrupt specifier for the PCIe controler +- interrupts: Interrupt specifier for the PCIe controller - clocks: reference to the PCIe controller clocks - clock-names: mandatory if there is a second clock, in this case the name must be "core" for the first clock and "reg" for the second diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index 2a5d91024059..29bcbd88f457 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt @@ -27,6 +27,11 @@ driver implementation may support the following properties: - reset-gpios: If present this property specifies PERST# GPIO. Host drivers can parse the GPIO and apply fundamental reset to endpoints. +- supports-clkreq: + If present this property specifies that CLKREQ signal routing exists from + root port to downstream device and host bridge drivers can do programming + which depends on CLKREQ signal existence. For example, programming root port + not to advertise ASPM L1 Sub-States support if there is no CLKREQ signal. PCI-PCI Bridge properties ------------------------- diff --git a/Documentation/devicetree/bindings/pci/pcie-al.txt b/Documentation/devicetree/bindings/pci/pcie-al.txt new file mode 100644 index 000000000000..557a5089229d --- /dev/null +++ b/Documentation/devicetree/bindings/pci/pcie-al.txt @@ -0,0 +1,46 @@ +* Amazon Annapurna Labs PCIe host bridge + +Amazon's Annapurna Labs PCIe Host Controller is based on the Synopsys DesignWare +PCI core. It inherits common properties defined in +Documentation/devicetree/bindings/pci/designware-pcie.txt. + +Properties of the host controller node that differ from it are: + +- compatible: + Usage: required + Value type: <stringlist> + Definition: Value should contain + - "amazon,al-alpine-v2-pcie" for alpine_v2 + - "amazon,al-alpine-v3-pcie" for alpine_v3 + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: Register ranges as listed in the reg-names property + +- reg-names: + Usage: required + Value type: <stringlist> + Definition: Must include the following entries + - "config" PCIe ECAM space + - "controller" AL proprietary registers + - "dbi" Designware PCIe registers + +Example: + + pcie-external0: pcie@fb600000 { + compatible = "amazon,al-alpine-v3-pcie"; + reg = <0x0 0xfb600000 0x0 0x00100000 + 0x0 0xfd800000 0x0 0x00010000 + 0x0 0xfd810000 0x0 0x00001000>; + reg-names = "config", "controller", "dbi"; + bus-range = <0 255>; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map-mask = <0x00 0 0 7>; + interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; /* INTa */ + ranges = <0x02000000 0x0 0xc0010000 0x0 0xc0010000 0x0 0x07ff0000>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt new file mode 100644 index 000000000000..d23ff90baad5 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt @@ -0,0 +1,28 @@ +NVIDIA Tegra194 P2U binding + +Tegra194 has two PHY bricks namely HSIO (High Speed IO) and NVHS (NVIDIA High +Speed) each interfacing with 12 and 8 P2U instances respectively. +A P2U instance is a glue logic between Synopsys DesignWare Core PCIe IP's PIPE +interface and PHY of HSIO/NVHS bricks. Each P2U instance represents one PCIe +lane. + +Required properties: +- compatible: For Tegra19x, must contain "nvidia,tegra194-p2u". +- reg: Should be the physical address space and length of respective each P2U + instance. +- reg-names: Must include the entry "ctl". + +Required properties for PHY port node: +- #phy-cells: Defined by generic PHY bindings. Must be 0. + +Refer to phy/phy-bindings.txt for the generic PHY binding properties. + +Example: + +p2u_hsio_0: phy@3e10000 { + compatible = "nvidia,tegra194-p2u"; + reg = <0x03e10000 0x10000>; + reg-names = "ctl"; + + #phy-cells = <0>; +}; diff --git a/MAINTAINERS b/MAINTAINERS index 7ea0c11b8e8d..6ee4bb5f6d30 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12580,16 +12580,18 @@ F: arch/x86/kernel/early-quirks.c PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +R: Andrew Murray <andrew.murray@arm.com> L: linux-pci@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ S: Supported F: drivers/pci/controller/ -PCIE DRIVER FOR ANNAPURNA LABS +PCIE DRIVER FOR AMAZON ANNAPURNA LABS M: Jonathan Chocron <jonnyc@amazon.com> L: linux-pci@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/pci/pcie-al.txt F: drivers/pci/controller/dwc/pcie-al.c PCIE DRIVER FOR AMLOGIC MESON diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 464df4290ffc..2f6977ada447 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -874,7 +874,6 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -899,7 +898,6 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index 124a7e2d8442..337919366dc8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -486,7 +486,6 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-lanes = <4>; num-viewport = <2>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index 71d9ed9ff985..c084c7a4b6a6 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -677,7 +677,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -704,7 +703,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <2>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -731,7 +729,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <2>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index b0ef08b090dd..d4c1da3d4bde 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -649,7 +649,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -671,7 +670,6 @@ reg-names = "regs", "addr_space"; num-ib-windows = <6>; num-ob-windows = <8>; - num-lanes = <2>; status = "disabled"; }; @@ -687,7 +685,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <2>; num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -709,7 +706,6 @@ reg-names = "regs", "addr_space"; num-ib-windows = <6>; num-ob-windows = <8>; - num-lanes = <2>; status = "disabled"; }; @@ -725,7 +721,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <2>; num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -747,7 +742,6 @@ reg-names = "regs", "addr_space"; num-ib-windows = <6>; num-ob-windows = <8>; - num-lanes = <2>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi index d1469b0747c7..c676d0771762 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi @@ -469,7 +469,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <256>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -495,7 +494,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ @@ -521,7 +519,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <8>; num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 64101c9962ce..7a0be8eaa84a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -639,7 +639,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; @@ -661,7 +660,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; @@ -683,7 +681,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <8>; num-viewport = <256>; bus-range = <0x0 0xff>; msi-parent = <&its>; @@ -705,7 +702,6 @@ #size-cells = <2>; device_type = "pci"; dma-coherent; - num-lanes = <4>; num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index 62e07e1197cc..4c38426a6969 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -289,5 +289,29 @@ gpio = <&gpio TEGRA194_MAIN_GPIO(A, 3) GPIO_ACTIVE_HIGH>; enable-active-high; }; + + vdd_3v3_pcie: regulator@2 { + compatible = "regulator-fixed"; + reg = <2>; + + regulator-name = "PEX_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio TEGRA194_MAIN_GPIO(Z, 2) GPIO_ACTIVE_HIGH>; + regulator-boot-on; + enable-active-high; + }; + + vdd_12v_pcie: regulator@3 { + compatible = "regulator-fixed"; + reg = <3>; + + regulator-name = "VDD_12V"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>; + regulator-boot-on; + enable-active-low; + }; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts index 23597d53c9c9..d47cd8c4dd24 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts @@ -93,9 +93,11 @@ }; pcie@141a0000 { - status = "disabled"; + status = "okay"; vddio-pex-ctl-supply = <&vdd_1v8ao>; + vpcie3v3-supply = <&vdd_3v3_pcie>; + vpcie12v-supply = <&vdd_12v_pcie>; phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>, diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index adebbbf36bd0..3c0cf54f0aab 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -3,8 +3,9 @@ #include <dt-bindings/gpio/tegra194-gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/mailbox/tegra186-hsp.h> -#include <dt-bindings/reset/tegra194-reset.h> +#include <dt-bindings/pinctrl/pinctrl-tegra.h> #include <dt-bindings/power/tegra194-powergate.h> +#include <dt-bindings/reset/tegra194-reset.h> #include <dt-bindings/thermal/tegra194-bpmp-thermal.h> / { @@ -130,6 +131,38 @@ }; }; + pinmux: pinmux@2430000 { + compatible = "nvidia,tegra194-pinmux"; + reg = <0x2430000 0x17000 + 0xc300000 0x4000>; + + status = "okay"; + + pex_rst_c5_out_state: pex_rst_c5_out { + pex_rst { + nvidia,pins = "pex_l5_rst_n_pgg1"; + nvidia,schmitt = <TEGRA_PIN_DISABLE>; + nvidia,lpdr = <TEGRA_PIN_ENABLE>; + nvidia,enable-input = <TEGRA_PIN_DISABLE>; + nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>; + nvidia,tristate = <TEGRA_PIN_DISABLE>; + nvidia,pull = <TEGRA_PIN_PULL_NONE>; + }; + }; + + clkreq_c5_bi_dir_state: clkreq_c5_bi_dir { + clkreq { + nvidia,pins = "pex_l5_clkreq_n_pgg0"; + nvidia,schmitt = <TEGRA_PIN_DISABLE>; + nvidia,lpdr = <TEGRA_PIN_ENABLE>; + nvidia,enable-input = <TEGRA_PIN_ENABLE>; + nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>; + nvidia,tristate = <TEGRA_PIN_DISABLE>; + nvidia,pull = <TEGRA_PIN_PULL_NONE>; + }; + }; + }; + uarta: serial@3100000 { compatible = "nvidia,tegra194-uart", "nvidia,tegra20-uart"; reg = <0x03100000 0x40>; @@ -1365,6 +1398,9 @@ num-viewport = <8>; linux,pci-domain = <5>; + pinctrl-names = "default"; + pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>; + clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>, <&bpmp TEGRA194_CLK_PEX1_CORE_5M>; clock-names = "core", "core_m"; diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 21ddba9188b2..7c4dc5d85f53 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -66,8 +66,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long size, pgprot_t prot); -#define HAVE_ARCH_PCI_RESOURCE_TO_USER - /* This part of code was originally in xilinx-pci.h */ #ifdef CONFIG_PCI_XILINX extern void __init xilinx_pci_init(void); diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 436099883022..6f48649201c5 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -108,7 +108,6 @@ extern unsigned long PCIBIOS_MIN_MEM; #define HAVE_PCI_MMAP #define ARCH_GENERIC_PCI_MMAP_RESOURCE -#define HAVE_ARCH_PCI_RESOURCE_TO_USER /* * Dynamic DMA mapping stuff. diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 2372d35533ad..327567b8f7d6 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -112,8 +112,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long size, pgprot_t prot); -#define HAVE_ARCH_PCI_RESOURCE_TO_USER - extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose); extern void pcibios_setup_bus_devices(struct pci_bus *bus); extern void pcibios_setup_bus_self(struct pci_bus *bus); diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index cfec79bb1831..4deddf430e5d 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -38,8 +38,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) #define arch_can_pci_mmap_io() 1 #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA #define get_pci_unmapped_area get_fb_unmapped_area - -#define HAVE_ARCH_PCI_RESOURCE_TO_USER #endif /* CONFIG_SPARC64 */ #if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI) diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 314a187ed572..d1e666ef3fcc 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -15,7 +15,6 @@ #include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/pci-acpi.h> -#include <linux/pci-aspm.h> #include <linux/dmar.h> #include <linux/acpi.h> #include <linux/slab.h> diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c index 02c15952b103..18b0c392bc93 100644 --- a/drivers/char/xillybus/xillybus_pcie.c +++ b/drivers/char/xillybus/xillybus_pcie.c @@ -9,7 +9,6 @@ #include <linux/module.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include "xillybus.h" diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index dce06108c8c3..5337393d4dfe 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -583,8 +583,10 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, break; } - /* P2PDMA contexts do not need to be unmapped */ - if (!is_pci_p2pdma_page(sg_page(sg))) + if (is_pci_p2pdma_page(sg_page(sg))) + pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg, + sg_cnt, dir); + else ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 34cd67951aec..6c51b1bad8c4 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -13,7 +13,6 @@ #include <linux/io.h> #include <linux/netdevice.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/timecounter.h> diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 6d52cf5ce20e..25aa400e2e3c 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0ef01db1f8b8..74f81fe03810 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -28,7 +28,6 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/prefetch.h> -#include <linux/pci-aspm.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index c6156cc38940..d5ee32ce9eb3 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -18,7 +18,6 @@ #include <linux/nl80211.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/etherdevice.h> #include <linux/module.h> #include "../ath.h" diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index b82da75a9ae3..4fbcc7fba3cc 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -18,7 +18,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index fa2c02881939..ffb705b18fb1 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -18,7 +18,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 5ab87a8dc907..f8a1f985a1d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -62,7 +62,6 @@ * *****************************************************************************/ #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/interrupt.h> #include <linux/debugfs.h> #include <linux/sched.h> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6b4d7b064b38..c0808f9eb8ab 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -549,8 +549,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) WARN_ON_ONCE(!iod->nents); - /* P2PDMA requests do not need to be unmapped */ - if (!is_pci_p2pdma_page(sg_page(iod->sg))) + if (is_pci_p2pdma_page(sg_page(iod->sg))) + pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, + rq_dma_dir(req)); + else dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); @@ -834,8 +836,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, goto out; if (is_pci_p2pdma_page(sg_page(iod->sg))) - nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, - rq_dma_dir(req)); + nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, + iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); else nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index c313de96a357..a304f5ea11b9 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -52,7 +52,7 @@ config PCI_MSI If you don't know what to do here, say Y. config PCI_MSI_IRQ_DOMAIN - def_bool ARC || ARM || ARM64 || X86 + def_bool ARC || ARM || ARM64 || X86 || RISCV depends on PCI_MSI select GENERIC_MSI_IRQ_DOMAIN @@ -170,7 +170,7 @@ config PCI_P2PDMA Many PCIe root complexes do not support P2P transactions and it's hard to tell which support it at all, so at this time, - P2P DMA transations must be between devices behind the same root + P2P DMA transactions must be between devices behind the same root port. If unsure, say N. @@ -181,7 +181,7 @@ config PCI_LABEL config PCI_HYPERV tristate "Hyper-V PCI Frontend" - depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS select PCI_HYPERV_INTERFACE help The PCI device frontend driver allows the kernel to import arbitrary diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 544922f097c0..2fccb5762c76 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -336,15 +336,6 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static bool pcie_downstream_port(const struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - - return type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_DOWNSTREAM || - type == PCI_EXP_TYPE_PCIE_BRIDGE; -} - bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 495059d923f7..8e40b3e6da77 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -417,11 +417,9 @@ struct pci_bus *pci_bus_get(struct pci_bus *bus) get_device(&bus->dev); return bus; } -EXPORT_SYMBOL(pci_bus_get); void pci_bus_put(struct pci_bus *bus) { if (bus) put_device(&bus->dev); } -EXPORT_SYMBOL(pci_bus_put); diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 6ea778ae4877..0ba988b5b5bc 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -131,13 +131,29 @@ config PCI_KEYSTONE_EP DesignWare core functions to implement the driver. config PCI_LAYERSCAPE - bool "Freescale Layerscape PCIe controller" + bool "Freescale Layerscape PCIe controller - Host mode" depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) depends on PCI_MSI_IRQ_DOMAIN select MFD_SYSCON select PCIE_DW_HOST help - Say Y here if you want PCIe controller support on Layerscape SoCs. + Say Y here if you want to enable PCIe controller support on Layerscape + SoCs to work in Host mode. + This controller can work either as EP or RC. The RCW[HOST_AGT_PEX] + determines which PCIe controller works in EP mode and which PCIe + controller works in RC mode. + +config PCI_LAYERSCAPE_EP + bool "Freescale Layerscape PCIe controller - Endpoint mode" + depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) + depends on PCI_ENDPOINT + select PCIE_DW_EP + help + Say Y here if you want to enable PCIe controller support on Layerscape + SoCs to work in Endpoint mode. + This controller can work either as EP or RC. The RCW[HOST_AGT_PEX] + determines which PCIe controller works in EP mode and which PCIe + controller works in RC mode. config PCI_HISI depends on OF && (ARM64 || COMPILE_TEST) @@ -220,6 +236,16 @@ config PCI_MESON and therefore the driver re-uses the DesignWare core functions to implement the driver. +config PCIE_TEGRA194 + tristate "NVIDIA Tegra194 (and later) PCIe controller" + depends on ARCH_TEGRA_194_SOC || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PHY_TEGRA194_P2U + help + Say Y here if you want support for DesignWare core based PCIe host + controller found in NVIDIA Tegra194 SoC. + config PCIE_UNIPHIER bool "Socionext UniPhier PCIe controllers" depends on ARCH_UNIPHIER || COMPILE_TEST @@ -230,4 +256,16 @@ config PCIE_UNIPHIER Say Y here if you want PCIe controller support on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs. +config PCIE_AL + bool "Amazon Annapurna Labs PCIe controller" + depends on OF && (ARM64 || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here to enable support of the Amazon's Annapurna Labs PCIe + controller IP on Amazon SoCs. The PCIe controller uses the DesignWare + core plus Annapurna Labs proprietary hardware wrappers. This is + required only for DT-based platforms. ACPI platforms with the + Annapurna Labs PCIe controller don't need to enable this. + endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index b085dfd4fab7..69faff371f11 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -8,13 +8,15 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o obj-$(CONFIG_PCI_MESON) += pci-meson.o +obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o # The following drivers are for devices that use the generic ACPI diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index cee5f2f590e2..14a6ba4067fb 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) ep->phy = devm_of_phy_get(dev, np, NULL); if (IS_ERR(ep->phy)) { - if (PTR_ERR(ep->phy) == -EPROBE_DEFER) + if (PTR_ERR(ep->phy) != -ENODEV) return PTR_ERR(ep->phy); ep->phy = NULL; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 9b5cb5b70389..acfbd34032a8 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -57,6 +57,7 @@ enum imx6_pcie_variants { struct imx6_pcie_drvdata { enum imx6_pcie_variants variant; u32 flags; + int dbi_length; }; struct imx6_pcie { @@ -1173,8 +1174,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); if (IS_ERR(imx6_pcie->vpcie)) { - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) + return PTR_ERR(imx6_pcie->vpcie); imx6_pcie->vpcie = NULL; } @@ -1212,6 +1213,7 @@ static const struct imx6_pcie_drvdata drvdata[] = { .variant = IMX6Q, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + .dbi_length = 0x200, }, [IMX6SX] = { .variant = IMX6SX, @@ -1254,6 +1256,37 @@ static struct platform_driver imx6_pcie_driver = { .shutdown = imx6_pcie_shutdown, }; +static void imx6_pcie_quirk(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pcie_port *pp = bus->sysdata; + + /* Bus parent is the PCI bridge, its parent is this platform driver */ + if (!bus->dev.parent || !bus->dev.parent->parent) + return; + + /* Make sure we only quirk devices associated with this driver */ + if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) + return; + + if (bus->number == pp->root_bus_nr) { + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + /* + * Limit config length to avoid the kernel reading beyond + * the register set and causing an abort on i.MX 6Quad + */ + if (imx6_pcie->drvdata->dbi_length) { + dev->cfg_size = imx6_pcie->drvdata->dbi_length; + dev_info(&dev->dev, "Limiting cfg_size to %d\n", + dev->cfg_size); + } + } +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, + PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk); + static int __init imx6_pcie_init(void) { #ifdef CONFIG_ARM diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index be61d96cc95e..ca9aa4501e7e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, + .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index 3ab58f0584a8..1eeda2f6371f 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -91,3 +91,368 @@ struct pci_ecam_ops al_pcie_ops = { }; #endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ + +#ifdef CONFIG_PCIE_AL + +#include <linux/of_pci.h> +#include "pcie-designware.h" + +#define AL_PCIE_REV_ID_2 2 +#define AL_PCIE_REV_ID_3 3 +#define AL_PCIE_REV_ID_4 4 + +#define AXI_BASE_OFFSET 0x0 + +#define DEVICE_ID_OFFSET 0x16c + +#define DEVICE_REV_ID 0x0 +#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16) + +#define DEVICE_REV_ID_DEV_ID_X4 0 +#define DEVICE_REV_ID_DEV_ID_X8 2 +#define DEVICE_REV_ID_DEV_ID_X16 4 + +#define OB_CTRL_REV1_2_OFFSET 0x0040 +#define OB_CTRL_REV3_5_OFFSET 0x0030 + +#define CFG_TARGET_BUS 0x0 +#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0) +#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8) + +#define CFG_CONTROL 0x4 +#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8) +#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16) + +struct al_pcie_reg_offsets { + unsigned int ob_ctrl; +}; + +struct al_pcie_target_bus_cfg { + u8 reg_val; + u8 reg_mask; + u8 ecam_mask; +}; + +struct al_pcie { + struct dw_pcie *pci; + void __iomem *controller_base; /* base of PCIe unit (not DW core) */ + struct device *dev; + resource_size_t ecam_size; + unsigned int controller_rev_id; + struct al_pcie_reg_offsets reg_offsets; + struct al_pcie_target_bus_cfg target_bus_cfg; +}; + +#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12) + +#define to_al_pcie(x) dev_get_drvdata((x)->dev) + +static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset) +{ + return readl_relaxed(pcie->controller_base + offset); +} + +static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset, + u32 val) +{ + writel_relaxed(val, pcie->controller_base + offset); +} + +static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id) +{ + u32 dev_rev_id_val; + u32 dev_id_val; + + dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET + + DEVICE_ID_OFFSET + + DEVICE_REV_ID); + dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val); + + switch (dev_id_val) { + case DEVICE_REV_ID_DEV_ID_X4: + *rev_id = AL_PCIE_REV_ID_2; + break; + case DEVICE_REV_ID_DEV_ID_X8: + *rev_id = AL_PCIE_REV_ID_3; + break; + case DEVICE_REV_ID_DEV_ID_X16: + *rev_id = AL_PCIE_REV_ID_4; + break; + default: + dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n", + dev_id_val); + return -EINVAL; + } + + dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val); + + return 0; +} + +static int al_pcie_reg_offsets_set(struct al_pcie *pcie) +{ + switch (pcie->controller_rev_id) { + case AL_PCIE_REV_ID_2: + pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET; + break; + case AL_PCIE_REV_ID_3: + case AL_PCIE_REV_ID_4: + pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET; + break; + default: + dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n", + pcie->controller_rev_id); + return -EINVAL; + } + + return 0; +} + +static inline void al_pcie_target_bus_set(struct al_pcie *pcie, + u8 target_bus, + u8 mask_target_bus) +{ + u32 reg; + + reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) | + FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus); + + al_pcie_controller_writel(pcie, AXI_BASE_OFFSET + + pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS, + reg); +} + +static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie, + unsigned int busnr, + unsigned int devfn) +{ + struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg; + unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask; + unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask; + struct pcie_port *pp = &pcie->pci->pp; + void __iomem *pci_base_addr; + + pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base + + (busnr_ecam << 20) + + PCIE_ECAM_DEVFN(devfn)); + + if (busnr_reg != target_bus_cfg->reg_val) { + dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n", + target_bus_cfg->reg_val, busnr_reg); + target_bus_cfg->reg_val = busnr_reg; + al_pcie_target_bus_set(pcie, + target_bus_cfg->reg_val, + target_bus_cfg->reg_mask); + } + + return pci_base_addr; +} + +static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + unsigned int busnr = bus->number; + void __iomem *pci_addr; + int rc; + + pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); + + rc = dw_pcie_read(pci_addr + where, size, val); + + dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where, + (pci_addr + where), *val); + + return rc; +} + +static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + unsigned int busnr = bus->number; + void __iomem *pci_addr; + int rc; + + pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); + + rc = dw_pcie_write(pci_addr + where, size, val); + + dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where, + (pci_addr + where), val); + + return rc; +} + +static void al_pcie_config_prepare(struct al_pcie *pcie) +{ + struct al_pcie_target_bus_cfg *target_bus_cfg; + struct pcie_port *pp = &pcie->pci->pp; + unsigned int ecam_bus_mask; + u32 cfg_control_offset; + u8 subordinate_bus; + u8 secondary_bus; + u32 cfg_control; + u32 reg; + + target_bus_cfg = &pcie->target_bus_cfg; + + ecam_bus_mask = (pcie->ecam_size >> 20) - 1; + if (ecam_bus_mask > 255) { + dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n"); + ecam_bus_mask = 255; + } + + /* This portion is taken from the transaction address */ + target_bus_cfg->ecam_mask = ecam_bus_mask; + /* This portion is taken from the cfg_target_bus reg */ + target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask; + target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask; + + al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val, + target_bus_cfg->reg_mask); + + secondary_bus = pp->busn->start + 1; + subordinate_bus = pp->busn->end; + + /* Set the valid values of secondary and subordinate buses */ + cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl + + CFG_CONTROL; + + cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset); + + reg = cfg_control & + ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK); + + reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) | + FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus); + + al_pcie_controller_writel(pcie, cfg_control_offset, reg); +} + +static int al_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + int rc; + + rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id); + if (rc) + return rc; + + rc = al_pcie_reg_offsets_set(pcie); + if (rc) + return rc; + + al_pcie_config_prepare(pcie); + + return 0; +} + +static const struct dw_pcie_host_ops al_pcie_host_ops = { + .rd_other_conf = al_pcie_rd_other_conf, + .wr_other_conf = al_pcie_wr_other_conf, + .host_init = al_pcie_host_init, +}; + +static int al_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + pp->ops = &al_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { +}; + +static int al_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *controller_res; + struct resource *ecam_res; + struct resource *dbi_res; + struct al_pcie *al_pcie; + struct dw_pcie *pci; + + al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); + if (!al_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + al_pcie->pci = pci; + al_pcie->dev = dev; + + dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res); + return PTR_ERR(pci->dbi_base); + } + + ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (!ecam_res) { + dev_err(dev, "couldn't find 'config' reg in DT\n"); + return -ENOENT; + } + al_pcie->ecam_size = resource_size(ecam_res); + + controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "controller"); + al_pcie->controller_base = devm_ioremap_resource(dev, controller_res); + if (IS_ERR(al_pcie->controller_base)) { + dev_err(dev, "couldn't remap controller base %pR\n", + controller_res); + return PTR_ERR(al_pcie->controller_base); + } + + dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n", + dbi_res, controller_res); + + platform_set_drvdata(pdev, al_pcie); + + return al_add_pcie_port(&pci->pp, pdev); +} + +static const struct of_device_id al_pcie_of_match[] = { + { .compatible = "amazon,al-alpine-v2-pcie", + }, + { .compatible = "amazon,al-alpine-v3-pcie", + }, + {}, +}; + +static struct platform_driver al_pcie_driver = { + .driver = { + .name = "al-pcie", + .of_match_table = al_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = al_pcie_probe, +}; +builtin_platform_driver(al_pcie_driver); + +#endif /* CONFIG_PCIE_AL*/ diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 3d55dc78d999..49596547e8c2 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -118,11 +118,10 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie) for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i); - if (IS_ERR(pcie->phy[i]) && - (PTR_ERR(pcie->phy[i]) == -EPROBE_DEFER)) - return PTR_ERR(pcie->phy[i]); - if (IS_ERR(pcie->phy[i])) { + if (PTR_ERR(pcie->phy[i]) != -ENODEV) + return PTR_ERR(pcie->phy[i]); + pcie->phy[i] = NULL; continue; } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 2bf5a35c0570..3dd2e2697294 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -40,39 +40,6 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) __dw_pcie_ep_reset_bar(pci, bar, 0); } -static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, - u8 cap) -{ - u8 cap_id, next_cap_ptr; - u16 reg; - - if (!cap_ptr) - return 0; - - reg = dw_pcie_readw_dbi(pci, cap_ptr); - cap_id = (reg & 0x00ff); - - if (cap_id > PCI_CAP_ID_MAX) - return 0; - - if (cap_id == cap) - return cap_ptr; - - next_cap_ptr = (reg & 0xff00) >> 8; - return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); -} - -static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) -{ - u8 next_cap_ptr; - u16 reg; - - reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); - next_cap_ptr = (reg & 0x00ff); - - return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); -} - static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr) { @@ -531,6 +498,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) int ret; u32 reg; void *addr; + u8 hdr_type; unsigned int nbars; unsigned int offset; struct pci_epc *epc; @@ -595,6 +563,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ep->ops->ep_init) ep->ops->ep_init(ep); + hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); + if (hdr_type != PCI_HEADER_TYPE_NORMAL) { + dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", + hdr_type); + return -EIO; + } + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; @@ -612,9 +587,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); return -ENOMEM; } - ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); + ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); - ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); + ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); if (offset) { diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index f93252d0da5b..0f36a926059a 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -323,6 +323,7 @@ int dw_pcie_host_init(struct pcie_port *pp) struct pci_bus *child; struct pci_host_bridge *bridge; struct resource *cfg_res; + u32 hdr_type; int ret; raw_spin_lock_init(&pci->pp.lock); @@ -464,6 +465,21 @@ int dw_pcie_host_init(struct pcie_port *pp) goto err_free_msi; } + ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type); + if (ret != PCIBIOS_SUCCESSFUL) { + dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n", + ret); + ret = pcibios_err_to_errno(ret); + goto err_free_msi; + } + if (hdr_type != PCI_HEADER_TYPE_BRIDGE) { + dev_err(pci->dev, + "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n", + hdr_type); + ret = -EIO; + goto err_free_msi; + } + pp->root_bus_nr = pp->busn->start; bridge->dev.parent = dev; @@ -628,6 +644,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) u32 val, ctrl, num_ctrls; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + /* + * Enable DBI read-only registers for writing/updating configuration. + * Write permission gets disabled towards the end of this function. + */ + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_setup(pci); if (!pp->ops->msi_host_init) { @@ -650,12 +672,10 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); /* Setup interrupt pins */ - dw_pcie_dbi_ro_wr_en(pci); val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); - dw_pcie_dbi_ro_wr_dis(pci); /* Setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); @@ -687,15 +707,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - /* Enable write permission for the DBI read-only register */ - dw_pcie_dbi_ro_wr_en(pci); /* Program correct class for RC */ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); - /* Better disable write permission right after the update */ - dw_pcie_dbi_ro_wr_dis(pci); dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); + + dw_pcie_dbi_ro_wr_dis(pci); } EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 7d25102c304c..820488dfeaed 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -14,6 +14,86 @@ #include "pcie-designware.h" +/* + * These interfaces resemble the pci_find_*capability() interfaces, but these + * are for configuring host controllers, which are bridges *to* PCI devices but + * are not PCI devices themselves. + */ +static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, + u8 cap) +{ + u8 cap_id, next_cap_ptr; + u16 reg; + + if (!cap_ptr) + return 0; + + reg = dw_pcie_readw_dbi(pci, cap_ptr); + cap_id = (reg & 0x00ff); + + if (cap_id > PCI_CAP_ID_MAX) + return 0; + + if (cap_id == cap) + return cap_ptr; + + next_cap_ptr = (reg & 0xff00) >> 8; + return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); +} + +u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) +{ + u8 next_cap_ptr; + u16 reg; + + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); + next_cap_ptr = (reg & 0x00ff); + + return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_capability); + +static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, + u8 cap) +{ + u32 header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (start) + pos = start; + + header = dw_pcie_readl_dbi(pci, pos); + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap && pos != start) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + header = dw_pcie_readl_dbi(pci, pos); + } + + return 0; +} + +u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) +{ + return dw_pcie_find_next_ext_capability(pci, 0, cap); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); + int dw_pcie_read(void __iomem *addr, int size, u32 *val) { if (!IS_ALIGNED((uintptr_t)addr, size)) { @@ -376,10 +456,11 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } - dev_err(pci->dev, "Phy link never came up\n"); + dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } +EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); int dw_pcie_link_up(struct dw_pcie *pci) { @@ -423,8 +504,10 @@ void dw_pcie_setup(struct dw_pcie *pci) ret = of_property_read_u32(np, "num-lanes", &lanes); - if (ret) - lanes = 0; + if (ret) { + dev_dbg(pci->dev, "property num-lanes isn't found\n"); + return; + } /* Set the number of lanes */ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); @@ -466,4 +549,11 @@ void dw_pcie_setup(struct dw_pcie *pci) break; } dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); + + if (of_property_read_bool(np, "snps,enable-cdm-check")) { + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | + PCIE_PL_CHK_REG_CHK_REG_START; + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + } } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index ffed084a0b4f..5a18e94e52c8 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -86,6 +86,15 @@ #define PCIE_MISC_CONTROL_1_OFF 0x8BC #define PCIE_DBI_RO_WR_EN BIT(0) +#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20 +#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0) +#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1) +#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16) +#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17) +#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18) + +#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28 + /* * iATU Unroll-specific register definitions * From 4.80 core version the address translation will be made by unroll @@ -251,6 +260,9 @@ struct dw_pcie { #define to_dw_pcie_from_ep(endpoint) \ container_of((endpoint), struct dw_pcie, ep) +u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); +u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); + int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 954bc2b74bbc..811b5c6d62ea 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev) hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); if (IS_ERR(hipcie->vpcie)) { - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(hipcie->vpcie) != -ENODEV) + return PTR_ERR(hipcie->vpcie); hipcie->vpcie = NULL; } diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 8df1914226be..c19617a912bd 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -436,7 +436,7 @@ static int kirin_pcie_host_init(struct pcie_port *pp) return 0; } -static struct dw_pcie_ops kirin_dw_pcie_ops = { +static const struct dw_pcie_ops kirin_dw_pcie_ops = { .read_dbi = kirin_pcie_read_dbi, .write_dbi = kirin_pcie_write_dbi, .link_up = kirin_pcie_link_up, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c new file mode 100644 index 000000000000..f89f5acee72d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -0,0 +1,1732 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Tegra194 SoC + * + * Copyright (C) 2019 NVIDIA Corporation. + * + * Author: Vidya Sagar <vidyas@nvidia.com> + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/random.h> +#include <linux/reset.h> +#include <linux/resource.h> +#include <linux/types.h> +#include "pcie-designware.h" +#include <soc/tegra/bpmp.h> +#include <soc/tegra/bpmp-abi.h> +#include "../../pci.h" + +#define APPL_PINMUX 0x0 +#define APPL_PINMUX_PEX_RST BIT(0) +#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) +#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) +#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) +#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) +#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9) +#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10) + +#define APPL_CTRL 0x4 +#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) +#define APPL_CTRL_LTSSM_EN BIT(7) +#define APPL_CTRL_HW_HOT_RST_EN BIT(20) +#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) +#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 +#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 + +#define APPL_INTR_EN_L0_0 0x8 +#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) +#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) +#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) +#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) +#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) +#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) + +#define APPL_INTR_STATUS_L0 0xC +#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) +#define APPL_INTR_STATUS_L0_INT_INT BIT(8) +#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) + +#define APPL_INTR_EN_L1_0_0 0x1C +#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) + +#define APPL_INTR_STATUS_L1_0_0 0x20 +#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) + +#define APPL_INTR_STATUS_L1_1 0x2C +#define APPL_INTR_STATUS_L1_2 0x30 +#define APPL_INTR_STATUS_L1_3 0x34 +#define APPL_INTR_STATUS_L1_6 0x3C +#define APPL_INTR_STATUS_L1_7 0x40 + +#define APPL_INTR_EN_L1_8_0 0x44 +#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) +#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) +#define APPL_INTR_EN_L1_8_INTX_EN BIT(11) +#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) + +#define APPL_INTR_STATUS_L1_8_0 0x4C +#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) +#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) +#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) + +#define APPL_INTR_STATUS_L1_9 0x54 +#define APPL_INTR_STATUS_L1_10 0x58 +#define APPL_INTR_STATUS_L1_11 0x64 +#define APPL_INTR_STATUS_L1_13 0x74 +#define APPL_INTR_STATUS_L1_14 0x78 +#define APPL_INTR_STATUS_L1_15 0x7C +#define APPL_INTR_STATUS_L1_17 0x88 + +#define APPL_INTR_EN_L1_18 0x90 +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) + +#define APPL_INTR_STATUS_L1_18 0x94 +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) + +#define APPL_MSI_CTRL_2 0xB0 + +#define APPL_LTR_MSG_1 0xC4 +#define LTR_MSG_REQ BIT(15) +#define LTR_MST_NO_SNOOP_SHIFT 16 + +#define APPL_LTR_MSG_2 0xC8 +#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) + +#define APPL_LINK_STATUS 0xCC +#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) + +#define APPL_DEBUG 0xD0 +#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) +#define APPL_DEBUG_PM_LINKST_IN_L0 0x11 +#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) +#define APPL_DEBUG_LTSSM_STATE_SHIFT 3 +#define LTSSM_STATE_PRE_DETECT 5 + +#define APPL_RADM_STATUS 0xE4 +#define APPL_PM_XMT_TURNOFF_STATE BIT(0) + +#define APPL_DM_TYPE 0x100 +#define APPL_DM_TYPE_MASK GENMASK(3, 0) +#define APPL_DM_TYPE_RP 0x4 +#define APPL_DM_TYPE_EP 0x0 + +#define APPL_CFG_BASE_ADDR 0x104 +#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) + +#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 +#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) + +#define APPL_CFG_MISC 0x110 +#define APPL_CFG_MISC_SLV_EP_MODE BIT(14) +#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) +#define APPL_CFG_MISC_ARCACHE_SHIFT 10 +#define APPL_CFG_MISC_ARCACHE_VAL 3 + +#define APPL_CFG_SLCG_OVERRIDE 0x114 +#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) + +#define APPL_CAR_RESET_OVRD 0x12C +#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) + +#define IO_BASE_IO_DECODE BIT(0) +#define IO_BASE_IO_DECODE_BIT8 BIT(8) + +#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) +#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) + +#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 +#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) + +#define EVENT_COUNTER_ALL_CLEAR 0x3 +#define EVENT_COUNTER_ENABLE_ALL 0x7 +#define EVENT_COUNTER_ENABLE_SHIFT 2 +#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) +#define EVENT_COUNTER_EVENT_SEL_SHIFT 16 +#define EVENT_COUNTER_EVENT_Tx_L0S 0x2 +#define EVENT_COUNTER_EVENT_Rx_L0S 0x3 +#define EVENT_COUNTER_EVENT_L1 0x5 +#define EVENT_COUNTER_EVENT_L1_1 0x7 +#define EVENT_COUNTER_EVENT_L1_2 0x8 +#define EVENT_COUNTER_GROUP_SEL_SHIFT 24 +#define EVENT_COUNTER_GROUP_5 0x5 + +#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C +#define ENTER_ASPM BIT(30) +#define L0S_ENTRANCE_LAT_SHIFT 24 +#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24) +#define L1_ENTRANCE_LAT_SHIFT 27 +#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27) +#define N_FTS_SHIFT 8 +#define N_FTS_MASK GENMASK(7, 0) +#define N_FTS_VAL 52 + +#define PORT_LOGIC_GEN2_CTRL 0x80C +#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17) +#define FTS_MASK GENMASK(7, 0) +#define FTS_VAL 52 + +#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828 + +#define GEN3_EQ_CONTROL_OFF 0x8a8 +#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 +#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) +#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) + +#define GEN3_RELATED_OFF 0x890 +#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) +#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) + +#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 +#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 +#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) +#define AMBA_ERROR_RESPONSE_CRS_OKAY 0 +#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 +#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 + +#define PORT_LOGIC_MSIX_DOORBELL 0x948 + +#define CAP_SPCIE_CAP_OFF 0x154 +#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) +#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) +#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 + +#define PME_ACK_TIMEOUT 10000 + +#define LTSSM_TIMEOUT 50000 /* 50ms */ + +#define GEN3_GEN4_EQ_PRESET_INIT 5 + +#define GEN1_CORE_CLK_FREQ 62500000 +#define GEN2_CORE_CLK_FREQ 125000000 +#define GEN3_CORE_CLK_FREQ 250000000 +#define GEN4_CORE_CLK_FREQ 500000000 + +static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, + GEN2_CORE_CLK_FREQ, + GEN3_CORE_CLK_FREQ, + GEN4_CORE_CLK_FREQ +}; + +static const u32 event_cntr_ctrl_offset[] = { + 0x1d8, + 0x1a8, + 0x1a8, + 0x1a8, + 0x1c4, + 0x1d8 +}; + +static const u32 event_cntr_data_offset[] = { + 0x1dc, + 0x1ac, + 0x1ac, + 0x1ac, + 0x1c8, + 0x1dc +}; + +struct tegra_pcie_dw { + struct device *dev; + struct resource *appl_res; + struct resource *dbi_res; + struct resource *atu_dma_res; + void __iomem *appl_base; + struct clk *core_clk; + struct reset_control *core_apb_rst; + struct reset_control *core_rst; + struct dw_pcie pci; + struct tegra_bpmp *bpmp; + + bool supports_clkreq; + bool enable_cdm_check; + bool link_state; + bool update_fc_fixup; + u8 init_link_width; + u32 msi_ctrl_int; + u32 num_lanes; + u32 max_speed; + u32 cid; + u32 cfg_link_cap_l1sub; + u32 pcie_cap_base; + u32 aspm_cmrt; + u32 aspm_pwr_on_t; + u32 aspm_l0s_enter_lat; + + struct regulator *pex_ctl_supply; + struct regulator *slot_ctl_3v3; + struct regulator *slot_ctl_12v; + + unsigned int phy_count; + struct phy **phys; + + struct dentry *debugfs; +}; + +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) +{ + return container_of(pci, struct tegra_pcie_dw, pci); +} + +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->appl_base + reg); +} + +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) +{ + return readl_relaxed(pcie->appl_base + reg); +} + +struct tegra_pcie_soc { + enum dw_pcie_device_mode mode; +}; + +static void apply_bad_link_workaround(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 current_link_width; + u16 val; + + /* + * NOTE:- Since this scenario is uncommon and link as such is not + * stable anyway, not waiting to confirm if link is really + * transitioning to Gen-2 speed + */ + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); + if (val & PCI_EXP_LNKSTA_LBMS) { + current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + if (pcie->init_link_width > current_link_width) { + dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2); + val &= ~PCI_EXP_LNKCTL2_TLS; + val |= PCI_EXP_LNKCTL2_TLS_2_5GT; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2, val); + + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL); + val |= PCI_EXP_LNKCTL_RL; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL, val); + } + } +} + +static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 val, tmp; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); + + /* SBR & Surprise Link Down WAR */ + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + udelay(1); + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + } + } + + if (val & APPL_INTR_STATUS_L0_INT_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); + if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { + appl_writel(pcie, + APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, + APPL_INTR_STATUS_L1_8_0); + apply_bad_link_workaround(pp); + } + if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { + appl_writel(pcie, + APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, + APPL_INTR_STATUS_L1_8_0); + + val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & + PCI_EXP_LNKSTA_CLS); + } + } + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); + tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { + dev_info(pci->dev, "CDM check complete\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; + } + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { + dev_err(pci->dev, "CDM comparison mismatch\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; + } + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { + dev_err(pci->dev, "CDM Logic error\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; + } + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); + tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); + dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); + } + + return IRQ_HANDLED; +} + +static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + + return tegra_pcie_rp_irq_handler(pcie); +} + +static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* + * This is an endpoint mode specific register happen to appear even + * when controller is operating in root port mode and system hangs + * when it is accessed with link being in ASPM-L1 state. + * So skip accessing it altogether + */ + if (where == PORT_LOGIC_MSIX_DOORBELL) { + *val = 0x00000000; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* + * This is an endpoint mode specific register happen to appear even + * when controller is operating in root port mode and system hangs + * when it is accessed with link being in ASPM-L1 state. + * So skip accessing it altogether + */ + if (where == PORT_LOGIC_MSIX_DOORBELL) + return PCIBIOS_SUCCESSFUL; + + return dw_pcie_write(pci->dbi_base + where, size, val); +} + +#if defined(CONFIG_PCIEASPM) +static void disable_aspm_l11(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_1; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static void disable_aspm_l12(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_2; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); + val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; + val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); + + return val; +} + +static int aspm_state_cnt(struct seq_file *s, void *data) +{ + struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) + dev_get_drvdata(s->private); + u32 val; + + seq_printf(s, "Tx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); + + seq_printf(s, "Rx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); + + seq_printf(s, "Link L1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); + + seq_printf(s, "Link L1.1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); + + seq_printf(s, "Link L1.2 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); + + /* Clear all counters */ + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], + EVENT_COUNTER_ALL_CLEAR); + + /* Re-enable counting */ + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + + return 0; +} + +static void init_host_aspm(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + u32 val; + + val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); + pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; + + /* Enable ASPM counters */ + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); + + /* Program T_cmrt and T_pwr_on values */ + val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); + val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); + val |= (pcie->aspm_cmrt << 8); + val |= (pcie->aspm_pwr_on_t << 19); + dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); + + /* Program L0s and L1 entrance latencies */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~L0S_ENTRANCE_LAT_MASK; + val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT); + val |= ENTER_ASPM; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); +} + +static int init_debugfs(struct tegra_pcie_dw *pcie) +{ + struct dentry *d; + + d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", + pcie->debugfs, aspm_state_cnt); + if (IS_ERR_OR_NULL(d)) + dev_err(pcie->dev, + "Failed to create debugfs file \"aspm_state_cnt\"\n"); + + return 0; +} +#else +static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } +static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } +static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } +static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; } +#endif + +static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + + if (pcie->enable_cdm_check) { + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_18); + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; + appl_writel(pcie, val, APPL_INTR_EN_L1_18); + } + + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL); + val_w |= PCI_EXP_LNKCTL_LBMIE; + dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, + val_w); +} + +static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + /* Enable legacy interrupt generation */ + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; + val |= APPL_INTR_EN_L0_0_INT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); + val |= APPL_INTR_EN_L1_8_INTX_EN; + val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; + val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; + if (IS_ENABLED(CONFIG_PCIEAER)) + val |= APPL_INTR_EN_L1_8_AER_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); +} + +static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + dw_pcie_msi_init(pp); + + /* Enable MSI interrupt generation */ + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; + val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); +} + +static void tegra_pcie_enable_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + /* Clear interrupt statuses before enabling interrupts */ + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + + tegra_pcie_enable_system_interrupts(pp); + tegra_pcie_enable_legacy_interrupts(pp); + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_pcie_enable_msi_interrupts(pp); +} + +static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + u32 val, offset, i; + + /* Program init preset */ + for (i = 0; i < pcie->num_lanes; i++) { + dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF + + (i * 2), 2, &val); + val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; + val |= GEN3_GEN4_EQ_PRESET_INIT; + val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; + val |= (GEN3_GEN4_EQ_PRESET_INIT << + CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); + dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF + + (i * 2), 2, val); + + offset = dw_pcie_find_ext_capability(pci, + PCI_EXT_CAP_ID_PL_16GT) + + PCI_PL_16GT_LE_CTRL; + dw_pcie_read(pci->dbi_base + offset + i, 1, &val); + val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; + val |= GEN3_GEN4_EQ_PRESET_INIT; + val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; + val |= (GEN3_GEN4_EQ_PRESET_INIT << + PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); + dw_pcie_write(pci->dbi_base + offset + i, 1, val); + } + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; + val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; + val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +} + +static void tegra_pcie_prepare_host(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); + val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); + dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); + + val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); + val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; + val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; + dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); + + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); + + /* Configure FTS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~(N_FTS_MASK << N_FTS_SHIFT); + val |= N_FTS_VAL << N_FTS_SHIFT; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val &= ~FTS_MASK; + val |= FTS_VAL; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + + /* Enable as 0xFFFF0001 response for CRS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); + val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); + val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << + AMBA_ERROR_RESPONSE_CRS_SHIFT); + dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); + + /* Configure Max Speed from DT */ + if (pcie->max_speed && pcie->max_speed != -EINVAL) { + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_SLS; + val |= pcie->max_speed; + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, + val); + } + + /* Configure Max lane width from DT */ + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_MLW; + val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); + + config_gen3_gen4_eq_presets(pcie); + + init_host_aspm(pcie); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + if (pcie->update_fc_fixup) { + val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); + val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; + dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); + } + + dw_pcie_setup_rc(pp); + + clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); + + /* Assert RST */ + val = appl_readl(pcie, APPL_PINMUX); + val &= ~APPL_PINMUX_PEX_RST; + appl_writel(pcie, val, APPL_PINMUX); + + usleep_range(100, 200); + + /* Enable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + /* De-assert RST */ + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_PEX_RST; + appl_writel(pcie, val, APPL_PINMUX); + + msleep(100); +} + +static int tegra_pcie_dw_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val, tmp, offset, speed; + + tegra_pcie_prepare_host(pp); + + if (dw_pcie_wait_for_link(pci)) { + /* + * There are some endpoints which can't get the link up if + * root port has Data Link Feature (DLF) enabled. + * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info + * on Scaled Flow Control and DLF. + * So, need to confirm that is indeed the case here and attempt + * link up once again with DLF disabled. + */ + val = appl_readl(pcie, APPL_DEBUG); + val &= APPL_DEBUG_LTSSM_STATE_MASK; + val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; + tmp = appl_readl(pcie, APPL_LINK_STATUS); + tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; + if (!(val == 0x11 && !tmp)) { + /* Link is down for all good reasons */ + return 0; + } + + dev_info(pci->dev, "Link is down in DLL"); + dev_info(pci->dev, "Trying again with DLFE disabled\n"); + /* Disable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + reset_control_assert(pcie->core_rst); + reset_control_deassert(pcie->core_rst); + + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); + val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); + val &= ~PCI_DLF_EXCHANGE_ENABLE; + dw_pcie_writel_dbi(pci, offset, val); + + tegra_pcie_prepare_host(pp); + + if (dw_pcie_wait_for_link(pci)) + return 0; + } + + speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & + PCI_EXP_LNKSTA_CLS; + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + tegra_pcie_enable_interrupts(pp); + + return 0; +} + +static int tegra_pcie_dw_link_up(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); +} + +static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) +{ + pp->num_vectors = MAX_MSI_IRQS; +} + +static const struct dw_pcie_ops tegra_dw_pcie_ops = { + .link_up = tegra_pcie_dw_link_up, +}; + +static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { + .rd_own_conf = tegra_pcie_dw_rd_own_conf, + .wr_own_conf = tegra_pcie_dw_wr_own_conf, + .host_init = tegra_pcie_dw_host_init, + .set_num_vectors = tegra_pcie_set_msi_vec_num, +}; + +static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) +{ + unsigned int phy_count = pcie->phy_count; + + while (phy_count--) { + phy_power_off(pcie->phys[phy_count]); + phy_exit(pcie->phys[phy_count]); + } +} + +static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) +{ + unsigned int i; + int ret; + + for (i = 0; i < pcie->phy_count; i++) { + ret = phy_init(pcie->phys[i]); + if (ret < 0) + goto phy_power_off; + + ret = phy_power_on(pcie->phys[i]); + if (ret < 0) + goto phy_exit; + } + + return 0; + +phy_power_off: + while (i--) { + phy_power_off(pcie->phys[i]); +phy_exit: + phy_exit(pcie->phys[i]); + } + + return ret; +} + +static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) +{ + struct device_node *np = pcie->dev->of_node; + int ret; + + ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); + if (ret < 0) { + dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); + return ret; + } + + ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", + &pcie->aspm_pwr_on_t); + if (ret < 0) + dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", + ret); + + ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", + &pcie->aspm_l0s_enter_lat); + if (ret < 0) + dev_info(pcie->dev, + "Failed to read ASPM L0s Entrance latency: %d\n", ret); + + ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); + if (ret < 0) { + dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); + return ret; + } + + pcie->max_speed = of_pci_get_max_link_speed(np); + + ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); + if (ret) { + dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); + return ret; + } + + ret = of_property_count_strings(np, "phy-names"); + if (ret < 0) { + dev_err(pcie->dev, "Failed to find PHY entries: %d\n", + ret); + return ret; + } + pcie->phy_count = ret; + + if (of_property_read_bool(np, "nvidia,update-fc-fixup")) + pcie->update_fc_fixup = true; + + pcie->supports_clkreq = + of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); + + pcie->enable_cdm_check = + of_property_read_bool(np, "snps,enable-cdm-check"); + + return 0; +} + +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + + /* Controller-5 doesn't need to have its state set by BPMP-FW */ + if (pcie->cid == 5) + return 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + return tegra_bpmp_transfer(pcie->bpmp, &msg); +} + +static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) +{ + struct pcie_port *pp = &pcie->pci.pp; + struct pci_bus *child, *root_bus = NULL; + struct pci_dev *pdev; + + /* + * link doesn't go into L2 state with some of the endpoints with Tegra + * if they are not in D0 state. So, need to make sure that immediate + * downstream devices are in D0 state before sending PME_TurnOff to put + * link into L2 state. + * This is as per PCI Express Base r4.0 v1.0 September 27-2017, + * 5.2 Link State Power Management (Page #428). + */ + + list_for_each_entry(child, &pp->root_bus->children, node) { + /* Bring downstream devices to D0 if they are not already in */ + if (child->parent == pp->root_bus) { + root_bus = child; + break; + } + } + + if (!root_bus) { + dev_err(pcie->dev, "Failed to find downstream devices\n"); + return; + } + + list_for_each_entry(pdev, &root_bus->devices, bus_list) { + if (PCI_SLOT(pdev->devfn) == 0) { + if (pci_set_power_state(pdev, PCI_D0)) + dev_err(pcie->dev, + "Failed to transition %s to D0 state\n", + dev_name(&pdev->dev)); + } + } +} + +static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) +{ + pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); + if (IS_ERR(pcie->slot_ctl_3v3)) { + if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) + return PTR_ERR(pcie->slot_ctl_3v3); + + pcie->slot_ctl_3v3 = NULL; + } + + pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); + if (IS_ERR(pcie->slot_ctl_12v)) { + if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) + return PTR_ERR(pcie->slot_ctl_12v); + + pcie->slot_ctl_12v = NULL; + } + + return 0; +} + +static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) +{ + int ret; + + if (pcie->slot_ctl_3v3) { + ret = regulator_enable(pcie->slot_ctl_3v3); + if (ret < 0) { + dev_err(pcie->dev, + "Failed to enable 3.3V slot supply: %d\n", ret); + return ret; + } + } + + if (pcie->slot_ctl_12v) { + ret = regulator_enable(pcie->slot_ctl_12v); + if (ret < 0) { + dev_err(pcie->dev, + "Failed to enable 12V slot supply: %d\n", ret); + goto fail_12v_enable; + } + } + + /* + * According to PCI Express Card Electromechanical Specification + * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) + * should be a minimum of 100ms. + */ + if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) + msleep(100); + + return 0; + +fail_12v_enable: + if (pcie->slot_ctl_3v3) + regulator_disable(pcie->slot_ctl_3v3); + return ret; +} + +static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) +{ + if (pcie->slot_ctl_12v) + regulator_disable(pcie->slot_ctl_12v); + if (pcie->slot_ctl_3v3) + regulator_disable(pcie->slot_ctl_3v3); +} + +static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, + bool en_hw_hot_rst) +{ + int ret; + u32 val; + + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); + if (ret) { + dev_err(pcie->dev, + "Failed to enable controller %u: %d\n", pcie->cid, ret); + return ret; + } + + ret = tegra_pcie_enable_slot_regulators(pcie); + if (ret < 0) + goto fail_slot_reg_en; + + ret = regulator_enable(pcie->pex_ctl_supply); + if (ret < 0) { + dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); + goto fail_reg_en; + } + + ret = clk_prepare_enable(pcie->core_clk); + if (ret) { + dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); + goto fail_core_clk; + } + + ret = reset_control_deassert(pcie->core_apb_rst); + if (ret) { + dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", + ret); + goto fail_core_apb_rst; + } + + if (en_hw_hot_rst) { + /* Enable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + } + + ret = tegra_pcie_enable_phy(pcie); + if (ret) { + dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); + goto fail_phy; + } + + /* Update CFG base address */ + appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, + APPL_CFG_BASE_ADDR); + + /* Configure this core for RP mode operation */ + appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); + + appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); + + val = appl_readl(pcie, APPL_CTRL); + appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); + + val = appl_readl(pcie, APPL_CFG_MISC); + val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); + appl_writel(pcie, val, APPL_CFG_MISC); + + if (!pcie->supports_clkreq) { + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN; + val |= APPL_PINMUX_CLKREQ_OUT_OVRD; + appl_writel(pcie, val, APPL_PINMUX); + } + + /* Update iATU_DMA base address */ + appl_writel(pcie, + pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, + APPL_CFG_IATU_DMA_BASE_ADDR); + + reset_control_deassert(pcie->core_rst); + + pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, + PCI_CAP_ID_EXP); + + /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */ + if (!pcie->supports_clkreq) { + disable_aspm_l11(pcie); + disable_aspm_l12(pcie); + } + + return ret; + +fail_phy: + reset_control_assert(pcie->core_apb_rst); +fail_core_apb_rst: + clk_disable_unprepare(pcie->core_clk); +fail_core_clk: + regulator_disable(pcie->pex_ctl_supply); +fail_reg_en: + tegra_pcie_disable_slot_regulators(pcie); +fail_slot_reg_en: + tegra_pcie_bpmp_set_ctrl_state(pcie, false); + + return ret; +} + +static int __deinit_controller(struct tegra_pcie_dw *pcie) +{ + int ret; + + ret = reset_control_assert(pcie->core_rst); + if (ret) { + dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", + ret); + return ret; + } + + tegra_pcie_disable_phy(pcie); + + ret = reset_control_assert(pcie->core_apb_rst); + if (ret) { + dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); + return ret; + } + + clk_disable_unprepare(pcie->core_clk); + + ret = regulator_disable(pcie->pex_ctl_supply); + if (ret) { + dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); + return ret; + } + + tegra_pcie_disable_slot_regulators(pcie); + + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); + if (ret) { + dev_err(pcie->dev, "Failed to disable controller %d: %d\n", + pcie->cid, ret); + return ret; + } + + return ret; +} + +static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct pcie_port *pp = &pci->pp; + int ret; + + ret = tegra_pcie_config_controller(pcie, false); + if (ret < 0) + return ret; + + pp->ops = &tegra_pcie_dw_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret < 0) { + dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); + goto fail_host_init; + } + + return 0; + +fail_host_init: + return __deinit_controller(pcie); +} + +static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) +{ + u32 val; + + if (!tegra_pcie_dw_link_up(&pcie->pci)) + return 0; + + val = appl_readl(pcie, APPL_RADM_STATUS); + val |= APPL_PM_XMT_TURNOFF_STATE; + appl_writel(pcie, val, APPL_RADM_STATUS); + + return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, + val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, + 1, PME_ACK_TIMEOUT); +} + +static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) +{ + u32 data; + int err; + + if (!tegra_pcie_dw_link_up(&pcie->pci)) { + dev_dbg(pcie->dev, "PCIe link is not up...!\n"); + return; + } + + if (tegra_pcie_try_link_l2(pcie)) { + dev_info(pcie->dev, "Link didn't transition to L2 state\n"); + /* + * TX lane clock freq will reset to Gen1 only if link is in L2 + * or detect state. + * So apply pex_rst to end point to force RP to go into detect + * state + */ + data = appl_readl(pcie, APPL_PINMUX); + data &= ~APPL_PINMUX_PEX_RST; + appl_writel(pcie, data, APPL_PINMUX); + + err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, + data, + ((data & + APPL_DEBUG_LTSSM_STATE_MASK) >> + APPL_DEBUG_LTSSM_STATE_SHIFT) == + LTSSM_STATE_PRE_DETECT, + 1, LTSSM_TIMEOUT); + if (err) { + dev_info(pcie->dev, "Link didn't go to detect state\n"); + } else { + /* Disable LTSSM after link is in detect state */ + data = appl_readl(pcie, APPL_CTRL); + data &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, data, APPL_CTRL); + } + } + /* + * DBI registers may not be accessible after this as PLL-E would be + * down depending on how CLKREQ is pulled by end point + */ + data = appl_readl(pcie, APPL_PINMUX); + data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); + /* Cut REFCLK to slot */ + data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; + data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; + appl_writel(pcie, data, APPL_PINMUX); +} + +static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) +{ + tegra_pcie_downstream_dev_to_D0(pcie); + dw_pcie_host_deinit(&pcie->pci.pp); + tegra_pcie_dw_pme_turnoff(pcie); + + return __deinit_controller(pcie); +} + +static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) +{ + struct pcie_port *pp = &pcie->pci.pp; + struct device *dev = pcie->dev; + char *name; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = of_irq_get_byname(dev->of_node, "msi"); + if (!pp->msi_irq) { + dev_err(dev, "Failed to get MSI interrupt\n"); + return -ENODEV; + } + } + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", + ret); + goto fail_pm_get_sync; + } + + ret = pinctrl_pm_select_default_state(dev); + if (ret < 0) { + dev_err(dev, "Failed to configure sideband pins: %d\n", ret); + goto fail_pinctrl; + } + + tegra_pcie_init_controller(pcie); + + pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); + if (!pcie->link_state) { + ret = -ENOMEDIUM; + goto fail_host_init; + } + + name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!name) { + ret = -ENOMEM; + goto fail_host_init; + } + + pcie->debugfs = debugfs_create_dir(name, NULL); + if (!pcie->debugfs) + dev_err(dev, "Failed to create debugfs\n"); + else + init_debugfs(pcie); + + return ret; + +fail_host_init: + tegra_pcie_deinit_controller(pcie); +fail_pinctrl: + pm_runtime_put_sync(dev); +fail_pm_get_sync: + pm_runtime_disable(dev); + return ret; +} + +static int tegra_pcie_dw_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *atu_dma_res; + struct tegra_pcie_dw *pcie; + struct resource *dbi_res; + struct pcie_port *pp; + struct dw_pcie *pci; + struct phy **phys; + char *name; + int ret; + u32 i; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = &pcie->pci; + pci->dev = &pdev->dev; + pci->ops = &tegra_dw_pcie_ops; + pp = &pci->pp; + pcie->dev = &pdev->dev; + + ret = tegra_pcie_dw_parse_dt(pcie); + if (ret < 0) { + dev_err(dev, "Failed to parse device tree: %d\n", ret); + return ret; + } + + ret = tegra_pcie_get_slot_regulators(pcie); + if (ret < 0) { + dev_err(dev, "Failed to get slot regulators: %d\n", ret); + return ret; + } + + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); + if (IS_ERR(pcie->pex_ctl_supply)) { + ret = PTR_ERR(pcie->pex_ctl_supply); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get regulator: %ld\n", + PTR_ERR(pcie->pex_ctl_supply)); + return ret; + } + + pcie->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(pcie->core_clk)) { + dev_err(dev, "Failed to get core clock: %ld\n", + PTR_ERR(pcie->core_clk)); + return PTR_ERR(pcie->core_clk); + } + + pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "appl"); + if (!pcie->appl_res) { + dev_err(dev, "Failed to find \"appl\" region\n"); + return -ENODEV; + } + + pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); + if (IS_ERR(pcie->appl_base)) + return PTR_ERR(pcie->appl_base); + + pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); + if (IS_ERR(pcie->core_apb_rst)) { + dev_err(dev, "Failed to get APB reset: %ld\n", + PTR_ERR(pcie->core_apb_rst)); + return PTR_ERR(pcie->core_apb_rst); + } + + phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); + if (!phys) + return -ENOMEM; + + for (i = 0; i < pcie->phy_count; i++) { + name = kasprintf(GFP_KERNEL, "p2u-%u", i); + if (!name) { + dev_err(dev, "Failed to create P2U string\n"); + return -ENOMEM; + } + phys[i] = devm_phy_get(dev, name); + kfree(name); + if (IS_ERR(phys[i])) { + ret = PTR_ERR(phys[i]); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get PHY: %d\n", ret); + return ret; + } + } + + pcie->phys = phys; + + dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + if (!dbi_res) { + dev_err(dev, "Failed to find \"dbi\" region\n"); + return -ENODEV; + } + pcie->dbi_res = dbi_res; + + pci->dbi_base = devm_ioremap_resource(dev, dbi_res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* Tegra HW locates DBI2 at a fixed offset from DBI */ + pci->dbi_base2 = pci->dbi_base + 0x1000; + + atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "atu_dma"); + if (!atu_dma_res) { + dev_err(dev, "Failed to find \"atu_dma\" region\n"); + return -ENODEV; + } + pcie->atu_dma_res = atu_dma_res; + + pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); + if (IS_ERR(pci->atu_base)) + return PTR_ERR(pci->atu_base); + + pcie->core_rst = devm_reset_control_get(dev, "core"); + if (IS_ERR(pcie->core_rst)) { + dev_err(dev, "Failed to get core reset: %ld\n", + PTR_ERR(pcie->core_rst)); + return PTR_ERR(pcie->core_rst); + } + + pp->irq = platform_get_irq_byname(pdev, "intr"); + if (!pp->irq) { + dev_err(dev, "Failed to get \"intr\" interrupt\n"); + return -ENODEV; + } + + ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler, + IRQF_SHARED, "tegra-pcie-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); + return ret; + } + + pcie->bpmp = tegra_bpmp_get(dev); + if (IS_ERR(pcie->bpmp)) + return PTR_ERR(pcie->bpmp); + + platform_set_drvdata(pdev, pcie); + + ret = tegra_pcie_config_rp(pcie); + if (ret && ret != -ENOMEDIUM) + goto fail; + else + return 0; + +fail: + tegra_bpmp_put(pcie->bpmp); + return ret; +} + +static int tegra_pcie_dw_remove(struct platform_device *pdev) +{ + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + + if (!pcie->link_state) + return 0; + + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_deinit_controller(pcie); + pm_runtime_put_sync(pcie->dev); + pm_runtime_disable(pcie->dev); + tegra_bpmp_put(pcie->bpmp); + + return 0; +} + +static int tegra_pcie_dw_suspend_late(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + u32 val; + + if (!pcie->link_state) + return 0; + + /* Enable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + return 0; +} + +static int tegra_pcie_dw_suspend_noirq(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + + if (!pcie->link_state) + return 0; + + /* Save MSI interrupt vector */ + pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, + PORT_LOGIC_MSI_CTRL_INT_0_EN); + tegra_pcie_downstream_dev_to_D0(pcie); + tegra_pcie_dw_pme_turnoff(pcie); + + return __deinit_controller(pcie); +} + +static int tegra_pcie_dw_resume_noirq(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + int ret; + + if (!pcie->link_state) + return 0; + + ret = tegra_pcie_config_controller(pcie, true); + if (ret < 0) + return ret; + + ret = tegra_pcie_dw_host_init(&pcie->pci.pp); + if (ret < 0) { + dev_err(dev, "Failed to init host: %d\n", ret); + goto fail_host_init; + } + + /* Restore MSI interrupt vector */ + dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN, + pcie->msi_ctrl_int); + + return 0; + +fail_host_init: + return __deinit_controller(pcie); +} + +static int tegra_pcie_dw_resume_early(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + u32 val; + + if (!pcie->link_state) + return 0; + + /* Disable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT; + val &= ~APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + return 0; +} + +static void tegra_pcie_dw_shutdown(struct platform_device *pdev) +{ + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + + if (!pcie->link_state) + return; + + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_downstream_dev_to_D0(pcie); + + disable_irq(pcie->pci.pp.irq); + if (IS_ENABLED(CONFIG_PCI_MSI)) + disable_irq(pcie->pci.pp.msi_irq); + + tegra_pcie_dw_pme_turnoff(pcie); + __deinit_controller(pcie); +} + +static const struct of_device_id tegra_pcie_dw_of_match[] = { + { + .compatible = "nvidia,tegra194-pcie", + }, + {}, +}; + +static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { + .suspend_late = tegra_pcie_dw_suspend_late, + .suspend_noirq = tegra_pcie_dw_suspend_noirq, + .resume_noirq = tegra_pcie_dw_resume_noirq, + .resume_early = tegra_pcie_dw_resume_early, +}; + +static struct platform_driver tegra_pcie_dw_driver = { + .probe = tegra_pcie_dw_probe, + .remove = tegra_pcie_dw_remove, + .shutdown = tegra_pcie_dw_shutdown, + .driver = { + .name = "tegra194-pcie", + .pm = &tegra_pcie_dw_pm_ops, + .of_match_table = tegra_pcie_dw_of_match, + }, +}; +module_platform_driver(tegra_pcie_dw_driver); + +MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); + +MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index c742881b5061..c8cb9c5188a4 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -43,9 +43,8 @@ static struct pci_config_window *gen_pci_init(struct device *dev, goto err_out; } - err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); + err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg); if (err) { - gen_pci_unmap_cfg(cfg); goto err_out; } return cfg; diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 0ca73c851e0f..f1f300218fab 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -2809,6 +2809,48 @@ static void put_hvpcibus(struct hv_pcibus_device *hbus) complete(&hbus->remove_event); } +#define HVPCI_DOM_MAP_SIZE (64 * 1024) +static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE); + +/* + * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0 + * as invalid for passthrough PCI devices of this driver. + */ +#define HVPCI_DOM_INVALID 0 + +/** + * hv_get_dom_num() - Get a valid PCI domain number + * Check if the PCI domain number is in use, and return another number if + * it is in use. + * + * @dom: Requested domain number + * + * return: domain number on success, HVPCI_DOM_INVALID on failure + */ +static u16 hv_get_dom_num(u16 dom) +{ + unsigned int i; + + if (test_and_set_bit(dom, hvpci_dom_map) == 0) + return dom; + + for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) { + if (test_and_set_bit(i, hvpci_dom_map) == 0) + return i; + } + + return HVPCI_DOM_INVALID; +} + +/** + * hv_put_dom_num() - Mark the PCI domain number as free + * @dom: Domain number to be freed + */ +static void hv_put_dom_num(u16 dom) +{ + clear_bit(dom, hvpci_dom_map); +} + /** * hv_pci_probe() - New VMBus channel probe, for a root PCI bus * @hdev: VMBus's tracking struct for this root PCI bus @@ -2820,6 +2862,7 @@ static int hv_pci_probe(struct hv_device *hdev, const struct hv_vmbus_device_id *dev_id) { struct hv_pcibus_device *hbus; + u16 dom_req, dom; char *name; int ret; @@ -2835,19 +2878,34 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->state = hv_pcibus_init; /* - * The PCI bus "domain" is what is called "segment" in ACPI and - * other specs. Pull it from the instance ID, to get something - * unique. Bytes 8 and 9 are what is used in Windows guests, so - * do the same thing for consistency. Note that, since this code - * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee - * that (1) the only domain in use for something that looks like - * a physical PCI bus (which is actually emulated by the - * hypervisor) is domain 0 and (2) there will be no overlap - * between domains derived from these instance IDs in the same - * VM. + * The PCI bus "domain" is what is called "segment" in ACPI and other + * specs. Pull it from the instance ID, to get something usually + * unique. In rare cases of collision, we will find out another number + * not in use. + * + * Note that, since this code only runs in a Hyper-V VM, Hyper-V + * together with this guest driver can guarantee that (1) The only + * domain used by Gen1 VMs for something that looks like a physical + * PCI bus (which is actually emulated by the hypervisor) is domain 0. + * (2) There will be no overlap between domains (after fixing possible + * collisions) in the same VM. */ - hbus->sysdata.domain = hdev->dev_instance.b[9] | - hdev->dev_instance.b[8] << 8; + dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4]; + dom = hv_get_dom_num(dom_req); + + if (dom == HVPCI_DOM_INVALID) { + dev_err(&hdev->device, + "Unable to use dom# 0x%hx or other numbers", dom_req); + ret = -EINVAL; + goto free_bus; + } + + if (dom != dom_req) + dev_info(&hdev->device, + "PCI dom# 0x%hx has collision, using 0x%hx", + dom_req, dom); + + hbus->sysdata.domain = dom; hbus->hdev = hdev; refcount_set(&hbus->remove_lock, 1); @@ -2862,7 +2920,7 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->sysdata.domain); if (!hbus->wq) { ret = -ENOMEM; - goto free_bus; + goto free_dom; } ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, @@ -2946,6 +3004,8 @@ close: vmbus_close(hdev->channel); destroy_wq: destroy_workqueue(hbus->wq); +free_dom: + hv_put_dom_num(hbus->sysdata.domain); free_bus: free_page((unsigned long)hbus); return ret; @@ -3008,8 +3068,8 @@ static int hv_pci_remove(struct hv_device *hdev) /* Remove the bus from PCI's point of view. */ pci_lock_rescan_remove(); pci_stop_root_bus(hbus->pci_bus); - pci_remove_root_bus(hbus->pci_bus); hv_pci_remove_slots(hbus); + pci_remove_root_bus(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_removed; } @@ -3027,6 +3087,9 @@ static int hv_pci_remove(struct hv_device *hdev) put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); destroy_workqueue(hbus->wq); + + hv_put_dom_num(hbus->sysdata.domain); + free_page((unsigned long)hbus); return 0; } @@ -3058,6 +3121,9 @@ static void __exit exit_hv_pci_drv(void) static int __init init_hv_pci_drv(void) { + /* Set the invalid domain number's bit, so it will not be used */ + set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); + /* Initialize PCI block r/w interface */ hvpci_block_ops.read_block = hv_read_config_block; hvpci_block_ops.write_block = hv_write_config_block; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 9a917b2456f6..673a1725ef38 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2237,14 +2237,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_pci_get_devfn(port); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { dev_err(dev, "invalid port number: %d\n", index); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } index--; @@ -2253,12 +2254,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (err < 0) { dev_err(dev, "failed to parse # of lanes: %d\n", err); - return err; + goto err_node_put; } if (value > 16) { dev_err(dev, "invalid # of lanes: %u\n", value); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } lanes |= value << (index << 3); @@ -2272,13 +2274,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) - return -ENOMEM; + if (!rp) { + err = -ENOMEM; + goto err_node_put; + } err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } INIT_LIST_HEAD(&rp->list); @@ -2330,6 +2334,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return err; return 0; + +err_node_put: + of_node_put(port); + return err; } /* diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index 5a3550b6bb29..9ee6200a66f4 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -93,12 +93,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); /* PHY use is optional */ - pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(pcie->phy)) { - if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) - return -EPROBE_DEFER; - pcie->phy = NULL; - } + pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, &iobase); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 80601e1b939e..626a7c352dfd 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -73,6 +73,7 @@ #define PCIE_MSI_VECTOR 0x0c0 #define PCIE_CONF_VEND_ID 0x100 +#define PCIE_CONF_DEVICE_ID 0x102 #define PCIE_CONF_CLASS_ID 0x106 #define PCIE_INT_MASK 0x420 @@ -141,12 +142,16 @@ struct mtk_pcie_port; /** * struct mtk_pcie_soc - differentiate between host generations * @need_fix_class_id: whether this host's class ID needed to be fixed or not + * @need_fix_device_id: whether this host's device ID needed to be fixed or not + * @device_id: device ID which this host need to be fixed * @ops: pointer to configuration access functions * @startup: pointer to controller setting functions * @setup_irq: pointer to initialize IRQ functions */ struct mtk_pcie_soc { bool need_fix_class_id; + bool need_fix_device_id; + unsigned int device_id; struct pci_ops *ops; int (*startup)(struct mtk_pcie_port *port); int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); @@ -630,8 +635,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc) } chained_irq_exit(irqchip, desc); - - return; } static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, @@ -696,6 +699,9 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) writew(val, port->base + PCIE_CONF_CLASS_ID); } + if (soc->need_fix_device_id) + writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID); + /* 100ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, !!(val & PCIE_PORT_LINKUP_V2), 20, @@ -1216,11 +1222,21 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { .setup_irq = mtk_pcie_setup_irq, }; +static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = { + .need_fix_class_id = true, + .need_fix_device_id = true, + .device_id = PCI_DEVICE_ID_MEDIATEK_7629, + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_v2, + .setup_irq = mtk_pcie_setup_irq, +}; + static const struct of_device_id mtk_pcie_ids[] = { { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, + { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 }, {}, }; diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 672e633601c7..a45a6447b01d 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -88,6 +88,7 @@ #define AMAP_CTRL_TYPE_MASK 3 #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win) #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) @@ -462,7 +463,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) } static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, - u64 pci_addr, u32 type, u64 size) + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { u32 value; u64 size64 = ~(size - 1); @@ -482,7 +483,10 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, csr_writel(pcie, upper_32_bits(size64), PAB_EXT_PEX_AMAP_SIZEN(win_num)); - csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, lower_32_bits(cpu_addr), + PAB_PEX_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); csr_writel(pcie, lower_32_bits(pci_addr), PAB_PEX_AMAP_PEX_WIN_L(win_num)); @@ -624,7 +628,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); /* memory inbound translation window */ - program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &pcie->resources) { diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 8d20f1793a61..ef8e677ce9d1 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); if (IS_ERR(rockchip->vpcie12v)) { - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) + return PTR_ERR(rockchip->vpcie12v); dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) + return PTR_ERR(rockchip->vpcie3v3); dev_info(dev, "no vpcie3v3 regulator found\n"); } rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); if (IS_ERR(rockchip->vpcie1v8)) { - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV) + return PTR_ERR(rockchip->vpcie1v8); dev_info(dev, "no vpcie1v8 regulator found\n"); } rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); if (IS_ERR(rockchip->vpcie0v9)) { - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV) + return PTR_ERR(rockchip->vpcie0v9); dev_info(dev, "no vpcie0v9 regulator found\n"); } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 4575e0c6dc4b..a35d3f3996d7 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -31,6 +31,9 @@ #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) +#define MB2_SHADOW_OFFSET 0x2000 +#define MB2_SHADOW_SIZE 16 + enum vmd_features { /* * Device may contain registers which hint the physical location of the @@ -94,6 +97,7 @@ struct vmd_dev { struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; + u8 busn_start; struct dma_map_ops dma_ops; struct dma_domain dma_domain; @@ -440,7 +444,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { char __iomem *addr = vmd->cfgbar + - (bus->number << 20) + (devfn << 12) + reg; + ((bus->number - vmd->busn_start) << 20) + + (devfn << 12) + reg; if ((addr - vmd->cfgbar) + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) @@ -563,7 +568,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; - resource_size_t membar2_offset = 0x2000, busn_start = 0; + resource_size_t membar2_offset = 0x2000; struct pci_bus *child; /* @@ -576,7 +581,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) u32 vmlock; int ret; - membar2_offset = 0x2018; + membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); if (ret || vmlock == ~0) return -ENODEV; @@ -588,9 +593,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + 0x2008); + readq(membar2 + MB2_SHADOW_OFFSET); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + 0x2010); + readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(vmd->dev, membar2); } } @@ -606,14 +611,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); if (BUS_RESTRICT_CAP(vmcap) && (BUS_RESTRICT_CFG(vmconfig) == 0x1)) - busn_start = 128; + vmd->busn_start = 128; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", - .start = busn_start, - .end = busn_start + (resource_size(res) >> 20) - 1, + .start = vmd->busn_start, + .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; @@ -681,8 +686,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); - vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, - sd, &resources); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, + &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 603eadf3d965..d0559d2faf50 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -563,7 +563,6 @@ cleanup_slots(void) } cleanup_null: up_write(&list_rwsem); - return; } int diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 16bbb183695a..b8aacb41a83c 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -173,7 +173,6 @@ static void pci_print_IRQ_route(void) dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); } - return; } diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index b7f4e1f099d9..68de958a9be8 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1872,8 +1872,6 @@ static void interrupt_event_handler(struct controller *ctrl) } } /* End of FOR loop */ } - - return; } @@ -1943,8 +1941,6 @@ void cpqhp_pushbutton_thread(struct timer_list *t) p_slot->state = STATIC_STATE; } - - return; } diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h index 918ff8dbfe62..70e879b6a23f 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.h +++ b/drivers/pci/hotplug/cpqphp_nvram.h @@ -16,10 +16,7 @@ #ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM -static inline void compaq_nvram_init(void __iomem *rom_start) -{ - return; -} +static inline void compaq_nvram_init(void __iomem *rom_start) { } static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl) { diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index 5e8caf7a4452..5c93aa14f0de 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -1941,6 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus) break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; + /* fall through */ case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 8c51a04b8083..654c972b8ea0 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -110,9 +110,9 @@ struct controller { * * @OFF_STATE: slot is powered off, no subordinate devices are enumerated * @BLINKINGON_STATE: slot will be powered on after the 5 second delay, - * green led is blinking + * Power Indicator is blinking * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay, - * green led is blinking + * Power Indicator is blinking * @POWERON_STATE: slot is currently powering on * @POWEROFF_STATE: slot is currently powering off * @ON_STATE: slot is powered on, subordinate devices have been enumerated @@ -167,12 +167,11 @@ int pciehp_power_on_slot(struct controller *ctrl); void pciehp_power_off_slot(struct controller *ctrl); void pciehp_get_power_status(struct controller *ctrl, u8 *status); -void pciehp_set_attention_status(struct controller *ctrl, u8 status); +#define INDICATOR_NOOP -1 /* Leave indicator unchanged */ +void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn); + void pciehp_get_latch_status(struct controller *ctrl, u8 *status); int pciehp_query_power_fault(struct controller *ctrl); -void pciehp_green_led_on(struct controller *ctrl); -void pciehp_green_led_off(struct controller *ctrl); -void pciehp_green_led_blink(struct controller *ctrl); bool pciehp_card_present(struct controller *ctrl); bool pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 6ad0d86762cb..b3122c151b80 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -95,15 +95,20 @@ static void cleanup_slot(struct controller *ctrl) } /* - * set_attention_status - Turns the Amber LED for a slot on, off or blink + * set_attention_status - Turns the Attention Indicator on, off or blinking */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; + if (status) + status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT; + else + status = PCI_EXP_SLTCTL_ATTN_IND_OFF; + pci_config_pm_runtime_get(pdev); - pciehp_set_attention_status(ctrl, status); + pciehp_set_indicators(ctrl, INDICATOR_NOOP, status); pci_config_pm_runtime_put(pdev); return 0; } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 631ced0ab28a..21af7b16d7a4 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -30,7 +30,10 @@ static void set_slot_off(struct controller *ctrl) { - /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ + /* + * Turn off slot, turn on attention indicator, turn off power + * indicator + */ if (POWER_CTRL(ctrl)) { pciehp_power_off_slot(ctrl); @@ -42,8 +45,8 @@ static void set_slot_off(struct controller *ctrl) msleep(1000); } - pciehp_green_led_off(ctrl); - pciehp_set_attention_status(ctrl, 1); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_ON); } /** @@ -65,7 +68,8 @@ static int board_added(struct controller *ctrl) return retval; } - pciehp_green_led_blink(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, + INDICATOR_NOOP); /* Check link training status */ retval = pciehp_check_link_status(ctrl); @@ -90,8 +94,8 @@ static int board_added(struct controller *ctrl) } } - pciehp_green_led_on(ctrl); - pciehp_set_attention_status(ctrl, 0); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, + PCI_EXP_SLTCTL_ATTN_IND_OFF); return 0; err_exit: @@ -100,7 +104,7 @@ err_exit: } /** - * remove_board - Turns off slot and LEDs + * remove_board - Turn off slot and Power Indicator * @ctrl: PCIe hotplug controller where board is being removed * @safe_removal: whether the board is safely removed (versus surprise removed) */ @@ -123,8 +127,8 @@ static void remove_board(struct controller *ctrl, bool safe_removal) &ctrl->pending_events); } - /* turn off Green LED */ - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + INDICATOR_NOOP); } static int pciehp_enable_slot(struct controller *ctrl); @@ -171,9 +175,9 @@ void pciehp_handle_button_press(struct controller *ctrl) ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n", slot_name(ctrl)); } - /* blink green LED and turn off amber */ - pciehp_green_led_blink(ctrl); - pciehp_set_attention_status(ctrl, 0); + /* blink power indicator and turn off attention */ + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, + PCI_EXP_SLTCTL_ATTN_IND_OFF); schedule_delayed_work(&ctrl->button_work, 5 * HZ); break; case BLINKINGOFF_STATE: @@ -187,12 +191,13 @@ void pciehp_handle_button_press(struct controller *ctrl) cancel_delayed_work(&ctrl->button_work); if (ctrl->state == BLINKINGOFF_STATE) { ctrl->state = ON_STATE; - pciehp_green_led_on(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, + PCI_EXP_SLTCTL_ATTN_IND_OFF); } else { ctrl->state = OFF_STATE; - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_OFF); } - pciehp_set_attention_status(ctrl, 0); ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n", slot_name(ctrl)); break; @@ -310,7 +315,9 @@ static int pciehp_enable_slot(struct controller *ctrl) pm_runtime_get_sync(&ctrl->pcie->port->dev); ret = __pciehp_enable_slot(ctrl); if (ret && ATTN_BUTTN(ctrl)) - pciehp_green_led_off(ctrl); /* may be blinking */ + /* may be blinking */ + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + INDICATOR_NOOP); pm_runtime_put(&ctrl->pcie->port->dev); mutex_lock(&ctrl->state_lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index bd990e3371e3..1a522c1c4177 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -418,65 +418,40 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot, return 0; } -void pciehp_set_attention_status(struct controller *ctrl, u8 value) +/** + * pciehp_set_indicators() - set attention indicator, power indicator, or both + * @ctrl: PCIe hotplug controller + * @pwr: one of: + * PCI_EXP_SLTCTL_PWR_IND_ON + * PCI_EXP_SLTCTL_PWR_IND_BLINK + * PCI_EXP_SLTCTL_PWR_IND_OFF + * @attn: one of: + * PCI_EXP_SLTCTL_ATTN_IND_ON + * PCI_EXP_SLTCTL_ATTN_IND_BLINK + * PCI_EXP_SLTCTL_ATTN_IND_OFF + * + * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator + * unchanged. + */ +void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn) { - u16 slot_cmd; + u16 cmd = 0, mask = 0; - if (!ATTN_LED(ctrl)) - return; - - switch (value) { - case 0: /* turn off */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF; - break; - case 1: /* turn on */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON; - break; - case 2: /* turn blink */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK; - break; - default: - return; + if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) { + cmd |= (pwr & PCI_EXP_SLTCTL_PIC); + mask |= PCI_EXP_SLTCTL_PIC; } - pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); -} -void pciehp_green_led_on(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; - - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_ON); -} - -void pciehp_green_led_off(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; - - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_OFF); -} - -void pciehp_green_led_blink(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; + if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) { + cmd |= (attn & PCI_EXP_SLTCTL_AIC); + mask |= PCI_EXP_SLTCTL_AIC; + } - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_BLINK); + if (cmd) { + pcie_write_cmd_nowait(ctrl, cmd, mask); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, + pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); + } } int pciehp_power_on_slot(struct controller *ctrl) @@ -638,8 +613,8 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { ctrl->power_fault_detected = 1; ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); - pciehp_set_attention_status(ctrl, 1); - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_ON); } /* diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 182f9e3443ee..977946e4e613 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -473,7 +473,6 @@ int __init rpadlpar_io_init(void) void rpadlpar_io_exit(void) { dlpar_sysfs_exit(); - return; } module_init(rpadlpar_io_init); diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index c3899ee1db99..18627bb21e9e 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -408,7 +408,6 @@ static void __exit cleanup_slots(void) pci_hp_deregister(&slot->hotplug_slot); dealloc_slot_struct(slot); } - return; } static int __init rpaphp_init(void) diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 525fd3f272b3..b3f972e8cfed 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -240,6 +240,173 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id) pci_dev_put(dev); } +static ssize_t sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); +} + +static ssize_t sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->num_VFs); +} + +/* + * num_vfs > 0; number of VFs to enable + * num_vfs = 0; disable all VFs + * + * Note: SRIOV spec does not allow partial VF + * disable, so it's all or none. + */ +static ssize_t sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + u16 num_vfs; + + ret = kstrtou16(buf, 0, &num_vfs); + if (ret < 0) + return ret; + + if (num_vfs > pci_sriov_get_totalvfs(pdev)) + return -ERANGE; + + device_lock(&pdev->dev); + + if (num_vfs == pdev->sriov->num_VFs) + goto exit; + + /* is PF driver loaded w/callback */ + if (!pdev->driver || !pdev->driver->sriov_configure) { + pci_info(pdev, "Driver does not support SRIOV configuration via sysfs\n"); + ret = -ENOENT; + goto exit; + } + + if (num_vfs == 0) { + /* disable VFs */ + ret = pdev->driver->sriov_configure(pdev, 0); + goto exit; + } + + /* enable VFs */ + if (pdev->sriov->num_VFs) { + pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", + pdev->sriov->num_VFs, num_vfs); + ret = -EBUSY; + goto exit; + } + + ret = pdev->driver->sriov_configure(pdev, num_vfs); + if (ret < 0) + goto exit; + + if (ret != num_vfs) + pci_warn(pdev, "%d VFs requested; only %d enabled\n", + num_vfs, ret); + +exit: + device_unlock(&pdev->dev); + + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sriov_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->offset); +} + +static ssize_t sriov_stride_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->stride); +} + +static ssize_t sriov_vf_device_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%x\n", pdev->sriov->vf_device); +} + +static ssize_t sriov_drivers_autoprobe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); +} + +static ssize_t sriov_drivers_autoprobe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool drivers_autoprobe; + + if (kstrtobool(buf, &drivers_autoprobe) < 0) + return -EINVAL; + + pdev->sriov->drivers_autoprobe = drivers_autoprobe; + + return count; +} + +static DEVICE_ATTR_RO(sriov_totalvfs); +static DEVICE_ATTR_RW(sriov_numvfs); +static DEVICE_ATTR_RO(sriov_offset); +static DEVICE_ATTR_RO(sriov_stride); +static DEVICE_ATTR_RO(sriov_vf_device); +static DEVICE_ATTR_RW(sriov_drivers_autoprobe); + +static struct attribute *sriov_dev_attrs[] = { + &dev_attr_sriov_totalvfs.attr, + &dev_attr_sriov_numvfs.attr, + &dev_attr_sriov_offset.attr, + &dev_attr_sriov_stride.attr, + &dev_attr_sriov_vf_device.attr, + &dev_attr_sriov_drivers_autoprobe.attr, + NULL, +}; + +static umode_t sriov_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + + if (!dev_is_pf(dev)) + return 0; + + return a->mode; +} + +const struct attribute_group sriov_dev_attr_group = { + .attrs = sriov_dev_attrs, + .is_visible = sriov_attrs_are_visible, +}; + int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { return 0; @@ -557,8 +724,8 @@ static void sriov_restore_state(struct pci_dev *dev) ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI; pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); - for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) - pci_update_resource(dev, i); + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) + pci_update_resource(dev, i + PCI_IOV_RESOURCES); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); pci_iov_set_numvfs(dev, iov->num_VFs); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index bc7b27a28795..36891e7deee3 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -353,7 +353,7 @@ EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources); /** * of_irq_parse_pci - Resolve the interrupt for a PCI device * @pdev: the device whose interrupt is to be resolved - * @out_irq: structure of_irq filled by this function + * @out_irq: structure of_phandle_args filled by this function * * This function resolves the PCI interrupt for a given PCI device. If a * device-node exists for a given pci_dev, it will use normal OF tree diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 234476226529..0608aae72ccc 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -18,13 +18,32 @@ #include <linux/percpu-refcount.h> #include <linux/random.h> #include <linux/seq_buf.h> -#include <linux/iommu.h> +#include <linux/xarray.h> + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED, + PCI_P2PDMA_MAP_BUS_ADDR, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, +}; struct pci_p2pdma { struct gen_pool *pool; bool p2pmem_published; + struct xarray map_types; }; +struct pci_p2pdma_pagemap { + struct dev_pagemap pgmap; + struct pci_dev *provider; + u64 bus_offset; +}; + +static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) +{ + return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -87,6 +106,7 @@ static void pci_p2pdma_release(void *data) gen_pool_destroy(p2pdma->pool); sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); + xa_destroy(&p2pdma->map_types); } static int pci_p2pdma_setup(struct pci_dev *pdev) @@ -98,6 +118,8 @@ static int pci_p2pdma_setup(struct pci_dev *pdev) if (!p2p) return -ENOMEM; + xa_init(&p2p->map_types); + p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); if (!p2p->pool) goto out; @@ -135,6 +157,7 @@ out: int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { + struct pci_p2pdma_pagemap *p2p_pgmap; struct dev_pagemap *pgmap; void *addr; int error; @@ -157,14 +180,18 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, return error; } - pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); - if (!pgmap) + p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); + if (!p2p_pgmap) return -ENOMEM; + + pgmap = &p2p_pgmap->pgmap; pgmap->res.start = pci_resource_start(pdev, bar) + offset; pgmap->res.end = pgmap->res.start + size - 1; pgmap->res.flags = pci_resource_flags(pdev, bar); pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; - pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - + + p2p_pgmap->provider = pdev; + p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); addr = devm_memremap_pages(&pdev->dev, pgmap); @@ -246,19 +273,32 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) seq_buf_printf(buf, "%s;", pci_name(pdev)); } -/* - * If we can't find a common upstream bridge take a look at the root - * complex and compare it to a whitelist of known good hardware. - */ -static bool root_complex_whitelist(struct pci_dev *dev) +static const struct pci_p2pdma_whitelist_entry { + unsigned short vendor; + unsigned short device; + enum { + REQ_SAME_HOST_BRIDGE = 1 << 0, + } flags; +} pci_p2pdma_whitelist[] = { + /* AMD ZEN */ + {PCI_VENDOR_ID_AMD, 0x1450, 0}, + + /* Intel Xeon E5/Core i7 */ + {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, + /* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */ + {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE}, + {} +}; + +static bool __host_bridge_whitelist(struct pci_host_bridge *host, + bool same_host_bridge) { - struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); + const struct pci_p2pdma_whitelist_entry *entry; unsigned short vendor, device; - if (iommu_present(dev->dev.bus)) - return false; - if (!root) return false; @@ -266,65 +306,49 @@ static bool root_complex_whitelist(struct pci_dev *dev) device = root->device; pci_dev_put(root); - /* AMD ZEN host bridges can do peer to peer */ - if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450) + for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) { + if (vendor != entry->vendor || device != entry->device) + continue; + if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge) + return false; + return true; + } return false; } /* - * Find the distance through the nearest common upstream bridge between - * two PCI devices. - * - * If the two devices are the same device then 0 will be returned. - * - * If there are two virtual functions of the same device behind the same - * bridge port then 2 will be returned (one step down to the PCIe switch, - * then one step back to the same device). - * - * In the case where two devices are connected to the same PCIe switch, the - * value 4 will be returned. This corresponds to the following PCI tree: - * - * -+ Root Port - * \+ Switch Upstream Port - * +-+ Switch Downstream Port - * + \- Device A - * \-+ Switch Downstream Port - * \- Device B - * - * The distance is 4 because we traverse from Device A through the downstream - * port of the switch, to the common upstream port, back up to the second - * downstream port and then to Device B. - * - * Any two devices that don't have a common upstream bridge will return -1. - * In this way devices on separate PCIe root ports will be rejected, which - * is what we want for peer-to-peer seeing each PCIe root port defines a - * separate hierarchy domain and there's no way to determine whether the root - * complex supports forwarding between them. - * - * In the case where two devices are connected to different PCIe switches, - * this function will still return a positive distance as long as both - * switches eventually have a common upstream bridge. Note this covers - * the case of using multiple PCIe switches to achieve a desired level of - * fan-out from a root port. The exact distance will be a function of the - * number of switches between Device A and Device B. - * - * If a bridge which has any ACS redirection bits set is in the path - * then this functions will return -2. This is so we reject any - * cases where the TLPs are forwarded up into the root complex. - * In this case, a list of all infringing bridge addresses will be - * populated in acs_list (assuming it's non-null) for printk purposes. + * If we can't find a common upstream bridge take a look at the root + * complex and compare it to a whitelist of known good hardware. */ -static int upstream_bridge_distance(struct pci_dev *provider, - struct pci_dev *client, - struct seq_buf *acs_list) +static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b) +{ + struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus); + struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus); + + if (host_a == host_b) + return __host_bridge_whitelist(host_a, true); + + if (__host_bridge_whitelist(host_a, false) && + __host_bridge_whitelist(host_b, false)) + return true; + + return false; +} + +static enum pci_p2pdma_map_type +__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, + int *dist, bool *acs_redirects, struct seq_buf *acs_list) { struct pci_dev *a = provider, *b = client, *bb; int dist_a = 0; int dist_b = 0; int acs_cnt = 0; + if (acs_redirects) + *acs_redirects = false; + /* * Note, we don't need to take references to devices returned by * pci_upstream_bridge() seeing we hold a reference to a child @@ -353,15 +377,10 @@ static int upstream_bridge_distance(struct pci_dev *provider, dist_a++; } - /* - * Allow the connection if both devices are on a whitelisted root - * complex, but add an arbitrary large value to the distance. - */ - if (root_complex_whitelist(provider) && - root_complex_whitelist(client)) - return 0x1000 + dist_a + dist_b; + if (dist) + *dist = dist_a + dist_b; - return -1; + return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; check_b_path_acs: bb = b; @@ -378,33 +397,110 @@ check_b_path_acs: bb = pci_upstream_bridge(bb); } - if (acs_cnt) - return -2; + if (dist) + *dist = dist_a + dist_b; + + if (acs_cnt) { + if (acs_redirects) + *acs_redirects = true; + + return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + } + + return PCI_P2PDMA_MAP_BUS_ADDR; +} + +static unsigned long map_types_idx(struct pci_dev *client) +{ + return (pci_domain_nr(client->bus) << 16) | + (client->bus->number << 8) | client->devfn; +} + +/* + * Find the distance through the nearest common upstream bridge between + * two PCI devices. + * + * If the two devices are the same device then 0 will be returned. + * + * If there are two virtual functions of the same device behind the same + * bridge port then 2 will be returned (one step down to the PCIe switch, + * then one step back to the same device). + * + * In the case where two devices are connected to the same PCIe switch, the + * value 4 will be returned. This corresponds to the following PCI tree: + * + * -+ Root Port + * \+ Switch Upstream Port + * +-+ Switch Downstream Port + * + \- Device A + * \-+ Switch Downstream Port + * \- Device B + * + * The distance is 4 because we traverse from Device A through the downstream + * port of the switch, to the common upstream port, back up to the second + * downstream port and then to Device B. + * + * Any two devices that cannot communicate using p2pdma will return + * PCI_P2PDMA_MAP_NOT_SUPPORTED. + * + * Any two devices that have a data path that goes through the host bridge + * will consult a whitelist. If the host bridges are on the whitelist, + * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE. + * + * If either bridge is not on the whitelist this function returns + * PCI_P2PDMA_MAP_NOT_SUPPORTED. + * + * If a bridge which has any ACS redirection bits set is in the path, + * acs_redirects will be set to true. In this case, a list of all infringing + * bridge addresses will be populated in acs_list (assuming it's non-null) + * for printk purposes. + */ +static enum pci_p2pdma_map_type +upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, + int *dist, bool *acs_redirects, struct seq_buf *acs_list) +{ + enum pci_p2pdma_map_type map_type; + + map_type = __upstream_bridge_distance(provider, client, dist, + acs_redirects, acs_list); + + if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) { + if (!host_bridge_whitelist(provider, client)) + map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; + } + + if (provider->p2pdma) + xa_store(&provider->p2pdma->map_types, map_types_idx(client), + xa_mk_value(map_type), GFP_KERNEL); - return dist_a + dist_b; + return map_type; } -static int upstream_bridge_distance_warn(struct pci_dev *provider, - struct pci_dev *client) +static enum pci_p2pdma_map_type +upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client, + int *dist) { struct seq_buf acs_list; + bool acs_redirects; int ret; seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); if (!acs_list.buffer) return -ENOMEM; - ret = upstream_bridge_distance(provider, client, &acs_list); - if (ret == -2) { - pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", + ret = upstream_bridge_distance(provider, client, dist, &acs_redirects, + &acs_list); + if (acs_redirects) { + pci_warn(client, "ACS redirect is set between the client and provider (%s)\n", pci_name(provider)); /* Drop final semicolon */ acs_list.buffer[acs_list.len-1] = 0; pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", acs_list.buffer); + } - } else if (ret < 0) { - pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", + if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) { + pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n", pci_name(provider)); } @@ -421,22 +517,22 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider, * @num_clients: number of clients in the array * @verbose: if true, print warnings for devices when we return -1 * - * Returns -1 if any of the clients are not compatible (behind the same - * root port as the provider), otherwise returns a positive number where - * a lower number is the preferable choice. (If there's one client - * that's the same as the provider it will return 0, which is best choice). + * Returns -1 if any of the clients are not compatible, otherwise returns a + * positive number where a lower number is the preferable choice. (If there's + * one client that's the same as the provider it will return 0, which is best + * choice). * - * For now, "compatible" means the provider and the clients are all behind - * the same PCI root port. This cuts out cases that may work but is safest - * for the user. Future work can expand this to white-list root complexes that - * can safely forward between each ports. + * "compatible" means the provider and the clients are either all behind + * the same PCI root port or the host bridges connected to each of the devices + * are listed in the 'pci_p2pdma_whitelist'. */ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose) { bool not_supported = false; struct pci_dev *pci_client; - int distance = 0; + int total_dist = 0; + int distance; int i, ret; if (num_clients == 0) @@ -461,26 +557,26 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, if (verbose) ret = upstream_bridge_distance_warn(provider, - pci_client); + pci_client, &distance); else ret = upstream_bridge_distance(provider, pci_client, - NULL); + &distance, NULL, NULL); pci_dev_put(pci_client); - if (ret < 0) + if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) not_supported = true; if (not_supported && !verbose) break; - distance += ret; + total_dist += distance; } if (not_supported) return -1; - return distance; + return total_dist; } EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); @@ -706,21 +802,19 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) } EXPORT_SYMBOL_GPL(pci_p2pmem_publish); -/** - * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA - * @dev: device doing the DMA request - * @sg: scatter list to map - * @nents: elements in the scatterlist - * @dir: DMA direction - * - * Scatterlists mapped with this function should not be unmapped in any way. - * - * Returns the number of SG entries mapped or 0 on error. - */ -int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) +static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider, + struct pci_dev *client) +{ + if (!provider->p2pdma) + return PCI_P2PDMA_MAP_NOT_SUPPORTED; + + return xa_to_value(xa_load(&provider->p2pdma->map_types, + map_types_idx(client))); +} + +static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap, + struct device *dev, struct scatterlist *sg, int nents) { - struct dev_pagemap *pgmap; struct scatterlist *s; phys_addr_t paddr; int i; @@ -736,16 +830,80 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, return 0; for_each_sg(sg, s, nents, i) { - pgmap = sg_page(s)->pgmap; paddr = sg_phys(s); - s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; + s->dma_address = paddr - p2p_pgmap->bus_offset; sg_dma_len(s) = s->length; } return nents; } -EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); + +/** + * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA + * @dev: device doing the DMA request + * @sg: scatter list to map + * @nents: elements in the scatterlist + * @dir: DMA direction + * @attrs: DMA attributes passed to dma_map_sg() (if called) + * + * Scatterlists mapped with this function should be unmapped using + * pci_p2pdma_unmap_sg_attrs(). + * + * Returns the number of SG entries mapped or 0 on error. + */ +int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_p2pdma_pagemap *p2p_pgmap = + to_p2p_pgmap(sg_page(sg)->pgmap); + struct pci_dev *client; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return 0; + + client = to_pci_dev(dev); + + switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) { + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + return dma_map_sg_attrs(dev, sg, nents, dir, attrs); + case PCI_P2PDMA_MAP_BUS_ADDR: + return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents); + default: + WARN_ON_ONCE(1); + return 0; + } +} +EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs); + +/** + * pci_p2pdma_unmap_sg - unmap a PCI peer-to-peer scatterlist that was + * mapped with pci_p2pdma_map_sg() + * @dev: device doing the DMA request + * @sg: scatter list to map + * @nents: number of elements returned by pci_p2pdma_map_sg() + * @dir: DMA direction + * @attrs: DMA attributes passed to dma_unmap_sg() (if called) + */ +void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_p2pdma_pagemap *p2p_pgmap = + to_p2p_pgmap(sg_page(sg)->pgmap); + enum pci_p2pdma_map_type map_type; + struct pci_dev *client; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return; + + client = to_pci_dev(dev); + + map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client); + + if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) + dma_unmap_sg_attrs(dev, sg, nents, dir, attrs); +} +EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs); /** * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 45049f558860..0c02d500158f 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -14,7 +14,6 @@ #include <linux/msi.h> #include <linux/pci_hotplug.h> #include <linux/module.h> -#include <linux/pci-aspm.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> @@ -118,8 +117,58 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) return (phys_addr_t)mcfg_addr; } +/* _HPX PCI Setting Record (Type 0); same as _HPP */ +struct hpx_type0 { + u32 revision; /* Not present in _HPP */ + u8 cache_line_size; /* Not applicable to PCIe */ + u8 latency_timer; /* Not applicable to PCIe */ + u8 enable_serr; + u8 enable_perr; +}; + +static struct hpx_type0 pci_default_type0 = { + .revision = 1, + .cache_line_size = 8, + .latency_timer = 0x40, + .enable_serr = 0, + .enable_perr = 0, +}; + +static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) +{ + u16 pci_cmd, pci_bctl; + + if (!hpx) + hpx = &pci_default_type0; + + if (hpx->revision > 1) { + pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", + hpx->revision); + hpx = &pci_default_type0; + } + + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); + pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); + if (hpx->enable_serr) + pci_cmd |= PCI_COMMAND_SERR; + if (hpx->enable_perr) + pci_cmd |= PCI_COMMAND_PARITY; + pci_write_config_word(dev, PCI_COMMAND, pci_cmd); + + /* Program bridge control value */ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, + hpx->latency_timer); + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); + if (hpx->enable_perr) + pci_bctl |= PCI_BRIDGE_CTL_PARITY; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); + } +} + static acpi_status decode_type0_hpx_record(union acpi_object *record, - struct hpp_type0 *hpx0) + struct hpx_type0 *hpx0) { int i; union acpi_object *fields = record->package.elements; @@ -146,8 +195,30 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record, return AE_OK; } +/* _HPX PCI-X Setting Record (Type 1) */ +struct hpx_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) +{ + int pos; + + if (!hpx) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!pos) + return; + + pci_warn(dev, "PCI-X settings not supported\n"); +} + static acpi_status decode_type1_hpx_record(union acpi_object *record, - struct hpp_type1 *hpx1) + struct hpx_type1 *hpx1) { int i; union acpi_object *fields = record->package.elements; @@ -173,8 +244,130 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record, return AE_OK; } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + +/* _HPX PCI Express Setting Record (Type 2) */ +struct hpx_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) +{ + int pos; + u32 reg32; + + if (!hpx) + return; + + if (!pci_is_pcie(dev)) + return; + + if (hpx->revision > 1) { + pci_warn(dev, "PCIe settings rev %d not supported\n", + hpx->revision); + return; + } + + /* + * Don't allow _HPX to change MPS or MRRS settings. We manage + * those to make sure they're consistent with the rest of the + * platform. + */ + hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ; + hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ); + + /* Initialize Device Control Register */ + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); + + /* Initialize Link Control Register */ + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, + ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); + } + + /* Find Advanced Error Reporting Enhanced Capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + /* Initialize Uncorrectable Error Mask Register */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); + reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); + + /* Initialize Uncorrectable Error Severity Register */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); + reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); + + /* Initialize Correctable Error Mask Register */ + pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); + reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); + + /* Initialize Advanced Error Capabilities and Control Register */ + pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); + reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; + + /* Don't enable ECRC generation or checking if unsupported */ + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; + pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); + + /* + * FIXME: The following two registers are not supported yet. + * + * o Secondary Uncorrectable Error Severity Register + * o Secondary Uncorrectable Error Mask Register + */ +} + static acpi_status decode_type2_hpx_record(union acpi_object *record, - struct hpp_type2 *hpx2) + struct hpx_type2 *hpx2) { int i; union acpi_object *fields = record->package.elements; @@ -213,6 +406,164 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record, return AE_OK; } +/* _HPX PCI Express Setting Record (Type 3) */ +struct hpx_type3 { + u16 device_type; + u16 function_type; + u16 config_space_location; + u16 pci_exp_cap_id; + u16 pci_exp_cap_ver; + u16 pci_exp_vendor_id; + u16 dvsec_id; + u16 dvsec_rev; + u16 match_offset; + u32 match_mask_and; + u32 match_value; + u16 reg_offset; + u32 reg_mask_and; + u32 reg_mask_or; +}; + +enum hpx_type3_dev_type { + HPX_TYPE_ENDPOINT = BIT(0), + HPX_TYPE_LEG_END = BIT(1), + HPX_TYPE_RC_END = BIT(2), + HPX_TYPE_RC_EC = BIT(3), + HPX_TYPE_ROOT_PORT = BIT(4), + HPX_TYPE_UPSTREAM = BIT(5), + HPX_TYPE_DOWNSTREAM = BIT(6), + HPX_TYPE_PCI_BRIDGE = BIT(7), + HPX_TYPE_PCIE_BRIDGE = BIT(8), +}; + +static u16 hpx3_device_type(struct pci_dev *dev) +{ + u16 pcie_type = pci_pcie_type(dev); + const int pcie_to_hpx3_type[] = { + [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, + [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, + [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, + [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, + [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, + [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, + [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, + [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, + [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, + }; + + if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) + return 0; + + return pcie_to_hpx3_type[pcie_type]; +} + +enum hpx_type3_fn_type { + HPX_FN_NORMAL = BIT(0), + HPX_FN_SRIOV_PHYS = BIT(1), + HPX_FN_SRIOV_VIRT = BIT(2), +}; + +static u8 hpx3_function_type(struct pci_dev *dev) +{ + if (dev->is_virtfn) + return HPX_FN_SRIOV_VIRT; + else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) + return HPX_FN_SRIOV_PHYS; + else + return HPX_FN_NORMAL; +} + +static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) +{ + u8 cap_ver = hpx3_cap_id & 0xf; + + if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) + return true; + else if (cap_ver == pcie_cap_id) + return true; + + return false; +} + +enum hpx_type3_cfg_loc { + HPX_CFG_PCICFG = 0, + HPX_CFG_PCIE_CAP = 1, + HPX_CFG_PCIE_CAP_EXT = 2, + HPX_CFG_VEND_CAP = 3, + HPX_CFG_DVSEC = 4, + HPX_CFG_MAX, +}; + +static void program_hpx_type3_register(struct pci_dev *dev, + const struct hpx_type3 *reg) +{ + u32 match_reg, write_reg, header, orig_value; + u16 pos; + + if (!(hpx3_device_type(dev) & reg->device_type)) + return; + + if (!(hpx3_function_type(dev) & reg->function_type)) + return; + + switch (reg->config_space_location) { + case HPX_CFG_PCICFG: + pos = 0; + break; + case HPX_CFG_PCIE_CAP: + pos = pci_find_capability(dev, reg->pci_exp_cap_id); + if (pos == 0) + return; + + break; + case HPX_CFG_PCIE_CAP_EXT: + pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); + if (pos == 0) + return; + + pci_read_config_dword(dev, pos, &header); + if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), + reg->pci_exp_cap_ver)) + return; + + break; + case HPX_CFG_VEND_CAP: /* Fall through */ + case HPX_CFG_DVSEC: /* Fall through */ + default: + pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); + return; + } + + pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); + + if ((match_reg & reg->match_mask_and) != reg->match_value) + return; + + pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); + orig_value = write_reg; + write_reg &= reg->reg_mask_and; + write_reg |= reg->reg_mask_or; + + if (orig_value == write_reg) + return; + + pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); + + pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", + pos, orig_value, write_reg); +} + +static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) +{ + if (!hpx) + return; + + if (!pci_is_pcie(dev)) + return; + + program_hpx_type3_register(dev, hpx); +} + static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, union acpi_object *reg_fields) { @@ -233,8 +584,7 @@ static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, } static acpi_status program_type3_hpx_record(struct pci_dev *dev, - union acpi_object *record, - const struct hotplug_program_ops *hp_ops) + union acpi_object *record) { union acpi_object *fields = record->package.elements; u32 desc_count, expected_length, revision; @@ -258,7 +608,7 @@ static acpi_status program_type3_hpx_record(struct pci_dev *dev, for (i = 0; i < desc_count; i++) { reg_fields = fields + 3 + i * 14; parse_hpx3_register(&hpx3, reg_fields); - hp_ops->program_type3(dev, &hpx3); + program_hpx_type3(dev, &hpx3); } break; @@ -271,15 +621,14 @@ static acpi_status program_type3_hpx_record(struct pci_dev *dev, return AE_OK; } -static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, - const struct hotplug_program_ops *hp_ops) +static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package, *record, *fields; - struct hpp_type0 hpx0; - struct hpp_type1 hpx1; - struct hpp_type2 hpx2; + struct hpx_type0 hpx0; + struct hpx_type1 hpx1; + struct hpx_type2 hpx2; u32 type; int i; @@ -314,24 +663,24 @@ static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, status = decode_type0_hpx_record(record, &hpx0); if (ACPI_FAILURE(status)) goto exit; - hp_ops->program_type0(dev, &hpx0); + program_hpx_type0(dev, &hpx0); break; case 1: memset(&hpx1, 0, sizeof(hpx1)); status = decode_type1_hpx_record(record, &hpx1); if (ACPI_FAILURE(status)) goto exit; - hp_ops->program_type1(dev, &hpx1); + program_hpx_type1(dev, &hpx1); break; case 2: memset(&hpx2, 0, sizeof(hpx2)); status = decode_type2_hpx_record(record, &hpx2); if (ACPI_FAILURE(status)) goto exit; - hp_ops->program_type2(dev, &hpx2); + program_hpx_type2(dev, &hpx2); break; case 3: - status = program_type3_hpx_record(dev, record, hp_ops); + status = program_type3_hpx_record(dev, record); if (ACPI_FAILURE(status)) goto exit; break; @@ -347,16 +696,15 @@ static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, return status; } -static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, - const struct hotplug_program_ops *hp_ops) +static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package, *fields; - struct hpp_type0 hpp0; + struct hpx_type0 hpx0; int i; - memset(&hpp0, 0, sizeof(hpp0)); + memset(&hpx0, 0, sizeof(hpx0)); status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); if (ACPI_FAILURE(status)) @@ -377,26 +725,24 @@ static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, } } - hpp0.revision = 1; - hpp0.cache_line_size = fields[0].integer.value; - hpp0.latency_timer = fields[1].integer.value; - hpp0.enable_serr = fields[2].integer.value; - hpp0.enable_perr = fields[3].integer.value; + hpx0.revision = 1; + hpx0.cache_line_size = fields[0].integer.value; + hpx0.latency_timer = fields[1].integer.value; + hpx0.enable_serr = fields[2].integer.value; + hpx0.enable_perr = fields[3].integer.value; - hp_ops->program_type0(dev, &hpp0); + program_hpx_type0(dev, &hpx0); exit: kfree(buffer.pointer); return status; } -/* pci_get_hp_params +/* pci_acpi_program_hp_params * * @dev - the pci_dev for which we want parameters - * @hpp - allocated by the caller */ -int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops) +int pci_acpi_program_hp_params(struct pci_dev *dev) { acpi_status status; acpi_handle handle, phandle; @@ -419,10 +765,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev, * this pci dev. */ while (handle) { - status = acpi_run_hpx(dev, handle, hp_ops); + status = acpi_run_hpx(dev, handle); if (ACPI_SUCCESS(status)) return 0; - status = acpi_run_hpp(dev, handle, hp_ops); + status = acpi_run_hpp(dev, handle); if (ACPI_SUCCESS(status)) return 0; if (acpi_is_root_bridge(handle)) diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 06083b86d4f4..5fd90105510d 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior { u32 rsvd; }; -const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { +static const struct pci_bridge_reg_behavior pci_regs_behavior[] = { [PCI_VENDOR_ID / 4] = { .ro = ~0 }, [PCI_COMMAND / 4] = { .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | @@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { }, }; -const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { +static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 965c72104150..868e35109284 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -464,9 +464,7 @@ static ssize_t dev_rescan_store(struct device *dev, } return count; } -static struct device_attribute dev_rescan_attr = __ATTR(rescan, - (S_IWUSR|S_IWGRP), - NULL, dev_rescan_store); +static DEVICE_ATTR_WO(dev_rescan); static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -480,13 +478,12 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); return count; } -static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, - (S_IWUSR|S_IWGRP), - NULL, remove_store); +static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, + remove_store); -static ssize_t dev_bus_rescan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t bus_rescan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { unsigned long val; struct pci_bus *bus = to_pci_bus(dev); @@ -504,7 +501,7 @@ static ssize_t dev_bus_rescan_store(struct device *dev, } return count; } -static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); +static DEVICE_ATTR_WO(bus_rescan); #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, @@ -551,154 +548,6 @@ static ssize_t devspec_show(struct device *dev, static DEVICE_ATTR_RO(devspec); #endif -#ifdef CONFIG_PCI_IOV -static ssize_t sriov_totalvfs_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); -} - - -static ssize_t sriov_numvfs_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->num_VFs); -} - -/* - * num_vfs > 0; number of VFs to enable - * num_vfs = 0; disable all VFs - * - * Note: SRIOV spec doesn't allow partial VF - * disable, so it's all or none. - */ -static ssize_t sriov_numvfs_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - u16 num_vfs; - - ret = kstrtou16(buf, 0, &num_vfs); - if (ret < 0) - return ret; - - if (num_vfs > pci_sriov_get_totalvfs(pdev)) - return -ERANGE; - - device_lock(&pdev->dev); - - if (num_vfs == pdev->sriov->num_VFs) - goto exit; - - /* is PF driver loaded w/callback */ - if (!pdev->driver || !pdev->driver->sriov_configure) { - pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); - ret = -ENOENT; - goto exit; - } - - if (num_vfs == 0) { - /* disable VFs */ - ret = pdev->driver->sriov_configure(pdev, 0); - goto exit; - } - - /* enable VFs */ - if (pdev->sriov->num_VFs) { - pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", - pdev->sriov->num_VFs, num_vfs); - ret = -EBUSY; - goto exit; - } - - ret = pdev->driver->sriov_configure(pdev, num_vfs); - if (ret < 0) - goto exit; - - if (ret != num_vfs) - pci_warn(pdev, "%d VFs requested; only %d enabled\n", - num_vfs, ret); - -exit: - device_unlock(&pdev->dev); - - if (ret < 0) - return ret; - - return count; -} - -static ssize_t sriov_offset_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->offset); -} - -static ssize_t sriov_stride_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->stride); -} - -static ssize_t sriov_vf_device_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%x\n", pdev->sriov->vf_device); -} - -static ssize_t sriov_drivers_autoprobe_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); -} - -static ssize_t sriov_drivers_autoprobe_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pci_dev *pdev = to_pci_dev(dev); - bool drivers_autoprobe; - - if (kstrtobool(buf, &drivers_autoprobe) < 0) - return -EINVAL; - - pdev->sriov->drivers_autoprobe = drivers_autoprobe; - - return count; -} - -static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); -static struct device_attribute sriov_numvfs_attr = - __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), - sriov_numvfs_show, sriov_numvfs_store); -static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); -static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); -static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); -static struct device_attribute sriov_drivers_autoprobe_attr = - __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), - sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); -#endif /* CONFIG_PCI_IOV */ - static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -792,7 +641,7 @@ static struct attribute *pcie_dev_attrs[] = { }; static struct attribute *pcibus_attrs[] = { - &dev_attr_rescan.attr, + &dev_attr_bus_rescan.attr, &dev_attr_cpuaffinity.attr, &dev_attr_cpulistaffinity.attr, NULL, @@ -820,7 +669,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, !!(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)); } -static struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static DEVICE_ATTR_RO(boot_vga); static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, @@ -1085,7 +934,7 @@ void pci_create_legacy_files(struct pci_bus *b) sysfs_bin_attr_init(b->legacy_io); b->legacy_io->attr.name = "legacy_io"; b->legacy_io->size = 0xffff; - b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_io->attr.mode = 0600; b->legacy_io->read = pci_read_legacy_io; b->legacy_io->write = pci_write_legacy_io; b->legacy_io->mmap = pci_mmap_legacy_io; @@ -1099,7 +948,7 @@ void pci_create_legacy_files(struct pci_bus *b) sysfs_bin_attr_init(b->legacy_mem); b->legacy_mem->attr.name = "legacy_mem"; b->legacy_mem->size = 1024*1024; - b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); @@ -1306,7 +1155,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) } } res_attr->attr.name = res_attr_name; - res_attr->attr.mode = S_IRUSR | S_IWUSR; + res_attr->attr.mode = 0600; res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); @@ -1419,7 +1268,7 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, static const struct bin_attribute pci_config_attr = { .attr = { .name = "config", - .mode = S_IRUGO | S_IWUSR, + .mode = 0644, }, .size = PCI_CFG_SPACE_SIZE, .read = pci_read_config, @@ -1429,7 +1278,7 @@ static const struct bin_attribute pci_config_attr = { static const struct bin_attribute pcie_config_attr = { .attr = { .name = "config", - .mode = S_IRUGO | S_IWUSR, + .mode = 0644, }, .size = PCI_CFG_SPACE_EXP_SIZE, .read = pci_read_config, @@ -1458,7 +1307,7 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr, return count; } -static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); +static DEVICE_ATTR(reset, 0200, NULL, reset_store); static int pci_create_capabilities_sysfs(struct pci_dev *dev) { @@ -1468,7 +1317,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) pcie_aspm_create_sysfs_dev_files(dev); if (dev->reset_fn) { - retval = device_create_file(&dev->dev, &reset_attr); + retval = device_create_file(&dev->dev, &dev_attr_reset); if (retval) goto error; } @@ -1511,7 +1360,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) sysfs_bin_attr_init(attr); attr->size = rom_size; attr->attr.name = "rom"; - attr->attr.mode = S_IRUSR | S_IWUSR; + attr->attr.mode = 0600; attr->read = pci_read_rom; attr->write = pci_write_rom; retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); @@ -1553,7 +1402,7 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev) pcie_vpd_remove_sysfs_dev_files(dev); pcie_aspm_remove_sysfs_dev_files(dev); if (dev->reset_fn) { - device_remove_file(&dev->dev, &reset_attr); + device_remove_file(&dev->dev, &dev_attr_reset); dev->reset_fn = 0; } } @@ -1606,7 +1455,7 @@ static int __init pci_sysfs_init(void) late_initcall(pci_sysfs_init); static struct attribute *pci_dev_dev_attrs[] = { - &vga_attr.attr, + &dev_attr_boot_vga.attr, NULL, }; @@ -1616,7 +1465,7 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); - if (a == &vga_attr.attr) + if (a == &dev_attr_boot_vga.attr) if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) return 0; @@ -1624,8 +1473,8 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, } static struct attribute *pci_dev_hp_attrs[] = { - &dev_remove_attr.attr, - &dev_rescan_attr.attr, + &dev_attr_remove.attr, + &dev_attr_dev_rescan.attr, NULL, }; @@ -1697,34 +1546,6 @@ static const struct attribute_group pci_dev_hp_attr_group = { .is_visible = pci_dev_hp_attrs_are_visible, }; -#ifdef CONFIG_PCI_IOV -static struct attribute *sriov_dev_attrs[] = { - &sriov_totalvfs_attr.attr, - &sriov_numvfs_attr.attr, - &sriov_offset_attr.attr, - &sriov_stride_attr.attr, - &sriov_vf_device_attr.attr, - &sriov_drivers_autoprobe_attr.attr, - NULL, -}; - -static umode_t sriov_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) -{ - struct device *dev = kobj_to_dev(kobj); - - if (!dev_is_pf(dev)) - return 0; - - return a->mode; -} - -static const struct attribute_group sriov_dev_attr_group = { - .attrs = sriov_dev_attrs, - .is_visible = sriov_attrs_are_visible, -}; -#endif /* CONFIG_PCI_IOV */ - static const struct attribute_group pci_dev_attr_group = { .attrs = pci_dev_dev_attrs, .is_visible = pci_dev_attrs_are_visible, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1b27b5af3d55..e7982af9a5d8 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -890,8 +890,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - if (dev->current_state != state && printk_ratelimit()) - pci_info(dev, "Refused to change power state, currently in D%d\n", + if (dev->current_state != state) + pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n", dev->current_state); /* @@ -1443,7 +1443,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; res = pdev->resource + bar_idx; - size = order_base_2((resource_size(res) >> 20) | 1) - 1; + size = ilog2(resource_size(res)) - 20; ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); @@ -3581,7 +3581,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) } /* Ensure upstream ports don't block AtomicOps on egress */ - if (!bridge->has_secondary_link) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) @@ -5923,8 +5923,19 @@ resource_size_t __weak pcibios_default_alignment(void) return 0; } -#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE -static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; +/* + * Arches that don't want to expose struct resource to userland as-is in + * sysfs and /proc can implement their own pci_resource_to_user(). + */ +void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} + +static char *resource_alignment_param; static DEFINE_SPINLOCK(resource_alignment_lock); /** @@ -5945,7 +5956,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, spin_lock(&resource_alignment_lock); p = resource_alignment_param; - if (!*p && !align) + if (!p || !*p) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { align = 0; @@ -6109,35 +6120,41 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) } } -static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) +static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) { - if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) - count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; - spin_lock(&resource_alignment_lock); - strncpy(resource_alignment_param, buf, count); - resource_alignment_param[count] = '\0'; - spin_unlock(&resource_alignment_lock); - return count; -} + size_t count = 0; -static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) -{ - size_t count; spin_lock(&resource_alignment_lock); - count = snprintf(buf, size, "%s", resource_alignment_param); + if (resource_alignment_param) + count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param); spin_unlock(&resource_alignment_lock); - return count; -} -static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) -{ - return pci_get_resource_alignment_param(buf, PAGE_SIZE); + /* + * When set by the command line, resource_alignment_param will not + * have a trailing line feed, which is ugly. So conditionally add + * it here. + */ + if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) { + buf[count - 1] = '\n'; + buf[count++] = 0; + } + + return count; } static ssize_t resource_alignment_store(struct bus_type *bus, const char *buf, size_t count) { - return pci_set_resource_alignment_param(buf, count); + char *param = kstrndup(buf, count, GFP_KERNEL); + + if (!param) + return -ENOMEM; + + spin_lock(&resource_alignment_lock); + kfree(resource_alignment_param); + resource_alignment_param = param; + spin_unlock(&resource_alignment_lock); + return count; } static BUS_ATTR_RW(resource_alignment); @@ -6266,8 +6283,7 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "cbmemsize=", 10)) { pci_cardbus_mem_size = memparse(str + 10, &str); } else if (!strncmp(str, "resource_alignment=", 19)) { - pci_set_resource_alignment_param(str + 19, - strlen(str + 19)); + resource_alignment_param = str + 19; } else if (!strncmp(str, "ecrc=", 5)) { pcie_ecrc_get_policy(str + 5); } else if (!strncmp(str, "hpiosize=", 9)) { @@ -6302,15 +6318,18 @@ static int __init pci_setup(char *str) early_param("pci", pci_setup); /* - * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point - * to data in the __initdata section which will be freed after the init - * sequence is complete. We can't allocate memory in pci_setup() because some - * architectures do not have any memory allocation service available during - * an early_param() call. So we allocate memory and copy the variable here - * before the init section is freed. + * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized + * in pci_setup(), above, to point to data in the __initdata section which + * will be freed after the init sequence is complete. We can't allocate memory + * in pci_setup() because some architectures do not have any memory allocation + * service available during an early_param() call. So we allocate memory and + * copy the variable here before the init section is freed. + * */ static int __init pci_realloc_setup_params(void) { + resource_alignment_param = kstrdup(resource_alignment_param, + GFP_KERNEL); disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); return 0; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d22d1b807701..3f6947ee3324 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -39,6 +39,11 @@ int pci_probe_reset_function(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_D3COLD_WAIT 100 +#define PCI_PM_BUS_WAIT 50 + /** * struct pci_platform_pm_ops - Firmware PM callbacks * @@ -84,6 +89,8 @@ void pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); +bool pci_check_pme_status(struct pci_dev *dev); +void pci_pme_wakeup_bus(struct pci_bus *bus); int __pci_pme_wakeup(struct pci_dev *dev, void *ign); void pci_pme_restore(struct pci_dev *dev); bool pci_dev_need_resume(struct pci_dev *dev); @@ -118,11 +125,25 @@ static inline bool pci_power_manageable(struct pci_dev *pci_dev) return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; } +static inline bool pcie_downstream_port(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_PCIE_BRIDGE; +} + int pci_vpd_init(struct pci_dev *dev); void pci_vpd_release(struct pci_dev *dev); void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev); +/* PCI Virtual Channel */ +int pci_save_vc_state(struct pci_dev *dev); +void pci_restore_vc_state(struct pci_dev *dev); +void pci_allocate_vc_save_buffers(struct pci_dev *dev); + /* PCI /proc functions */ #ifdef CONFIG_PROC_FS int pci_proc_attach_device(struct pci_dev *dev); @@ -196,6 +217,9 @@ extern const struct attribute_group *pcibus_groups[]; extern const struct device_type pci_dev_type; extern const struct attribute_group *pci_bus_groups[]; +extern unsigned long pci_hotplug_io_size; +extern unsigned long pci_hotplug_mem_size; +extern unsigned long pci_hotplug_bus_size; /** * pci_match_one_device - Tell if a PCI device structure has a matching @@ -236,6 +260,9 @@ enum pci_bar_type { pci_bar_mem64, /* A 64-bit memory BAR */ }; +struct device *pci_get_host_bridge_device(struct pci_dev *dev); +void pci_put_host_bridge_device(struct device *dev); + int pci_configure_extended_tags(struct pci_dev *dev, void *ign); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); @@ -256,6 +283,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx); void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); +struct pci_bus *pci_bus_get(struct pci_bus *bus); +void pci_bus_put(struct pci_bus *bus); /* PCIe link information */ #define PCIE_SPEED2STR(speed) \ @@ -279,6 +308,7 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); void pcie_report_downtraining(struct pci_dev *dev); +void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); /* Single Root I/O Virtualization */ struct pci_sriov { @@ -418,11 +448,12 @@ static inline void pci_restore_dpc_state(struct pci_dev *dev) {} #endif #ifdef CONFIG_PCI_ATS +/* Address Translation Service */ +void pci_ats_init(struct pci_dev *dev); void pci_restore_ats_state(struct pci_dev *dev); #else -static inline void pci_restore_ats_state(struct pci_dev *dev) -{ -} +static inline void pci_ats_init(struct pci_dev *d) { } +static inline void pci_restore_ats_state(struct pci_dev *dev) { } #endif /* CONFIG_PCI_ATS */ #ifdef CONFIG_PCI_IOV @@ -433,7 +464,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); - +extern const struct attribute_group sriov_dev_attr_group; #else static inline int pci_iov_init(struct pci_dev *dev) { @@ -518,10 +549,21 @@ static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { } static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { } #endif +#ifdef CONFIG_PCIE_ECRC +void pcie_set_ecrc_checking(struct pci_dev *dev); +void pcie_ecrc_get_policy(char *str); +#else +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } +static inline void pcie_ecrc_get_policy(char *str) { } +#endif + #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); #else static inline void pci_ptm_init(struct pci_dev *dev) { } +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } #endif struct pci_dev_reset_methods { @@ -558,6 +600,10 @@ struct device_node; int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); int of_pci_get_max_link_speed(struct device_node *node); +void pci_set_of_node(struct pci_dev *dev); +void pci_release_of_node(struct pci_dev *dev); +void pci_set_bus_of_node(struct pci_bus *bus); +void pci_release_bus_of_node(struct pci_bus *bus); #else static inline int @@ -577,6 +623,11 @@ of_pci_get_max_link_speed(struct device_node *node) { return -EINVAL; } + +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } #endif /* CONFIG_OF */ #if defined(CONFIG_OF_ADDRESS) @@ -607,4 +658,13 @@ static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } #endif +#ifdef CONFIG_ACPI +int pci_acpi_program_hp_params(struct pci_dev *dev); +#else +static inline int pci_acpi_program_hp_params(struct pci_dev *dev) +{ + return -ENODEV; +} +#endif + #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 464f8f92653f..652ef23bba35 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -18,7 +18,6 @@ #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/delay.h> -#include <linux/pci-aspm.h> #include "../pci.h" #ifdef MODULE_PARAM_PREFIX @@ -913,10 +912,10 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) /* * We allocate pcie_link_state for the component on the upstream - * end of a Link, so there's nothing to do unless this device has a - * Link on its secondary side. + * end of a Link, so there's nothing to do unless this device is + * downstream port. */ - if (!pdev->has_secondary_link) + if (!pcie_downstream_port(pdev)) return; /* VIA has a strange chipset, root port is under a bridge */ @@ -1070,7 +1069,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) if (!pci_is_pcie(pdev)) return 0; - if (pdev->has_secondary_link) + if (pcie_downstream_port(pdev)) parent = pdev; if (!parent || !parent->link_state) return -EINVAL; diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 773197a12568..b0e6048a9208 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -166,7 +166,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service) driver = pcie_port_find_service(dev, service); if (driver && driver->reset_link) { status = driver->reset_link(dev); - } else if (dev->has_secondary_link) { + } else if (pcie_downstream_port(dev)) { status = default_reset_link(dev); } else { pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n", diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index dbeeb385fb9f..3d5271a7a849 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1426,26 +1426,38 @@ void set_pcie_port_type(struct pci_dev *pdev) pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; + parent = pci_upstream_bridge(pdev); + if (!parent) + return; + /* - * A Root Port or a PCI-to-PCIe bridge is always the upstream end - * of a Link. No PCIe component has two Links. Two Links are - * connected by a Switch that has a Port on each Link and internal - * logic to connect the two Ports. + * Some systems do not identify their upstream/downstream ports + * correctly so detect impossible configurations here and correct + * the port type accordingly. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_PCIE_BRIDGE) - pdev->has_secondary_link = 1; - else if (type == PCI_EXP_TYPE_UPSTREAM || - type == PCI_EXP_TYPE_DOWNSTREAM) { - parent = pci_upstream_bridge(pdev); - + if (type == PCI_EXP_TYPE_DOWNSTREAM) { /* - * Usually there's an upstream device (Root Port or Switch - * Downstream Port), but we can't assume one exists. + * If pdev claims to be downstream port but the parent + * device is also downstream port assume pdev is actually + * upstream port. */ - if (parent && !parent->has_secondary_link) - pdev->has_secondary_link = 1; + if (pcie_downstream_port(parent)) { + pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM; + } + } else if (type == PCI_EXP_TYPE_UPSTREAM) { + /* + * If pdev claims to be upstream port but the parent + * device is also upstream port assume pdev is actually + * downstream port. + */ + if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) { + pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM; + } } } @@ -1915,275 +1927,6 @@ static void pci_configure_mps(struct pci_dev *dev) p_mps, mps, mpss); } -static struct hpp_type0 pci_default_type0 = { - .revision = 1, - .cache_line_size = 8, - .latency_timer = 0x40, - .enable_serr = 0, - .enable_perr = 0, -}; - -static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) -{ - u16 pci_cmd, pci_bctl; - - if (!hpp) - hpp = &pci_default_type0; - - if (hpp->revision > 1) { - pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", - hpp->revision); - hpp = &pci_default_type0; - } - - pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); - pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); - if (hpp->enable_serr) - pci_cmd |= PCI_COMMAND_SERR; - if (hpp->enable_perr) - pci_cmd |= PCI_COMMAND_PARITY; - pci_write_config_word(dev, PCI_COMMAND, pci_cmd); - - /* Program bridge control value */ - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, - hpp->latency_timer); - pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); - if (hpp->enable_perr) - pci_bctl |= PCI_BRIDGE_CTL_PARITY; - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); - } -} - -static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) -{ - int pos; - - if (!hpp) - return; - - pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); - if (!pos) - return; - - pci_warn(dev, "PCI-X settings not supported\n"); -} - -static bool pcie_root_rcb_set(struct pci_dev *dev) -{ - struct pci_dev *rp = pcie_find_root_port(dev); - u16 lnkctl; - - if (!rp) - return false; - - pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); - if (lnkctl & PCI_EXP_LNKCTL_RCB) - return true; - - return false; -} - -static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) -{ - int pos; - u32 reg32; - - if (!hpp) - return; - - if (!pci_is_pcie(dev)) - return; - - if (hpp->revision > 1) { - pci_warn(dev, "PCIe settings rev %d not supported\n", - hpp->revision); - return; - } - - /* - * Don't allow _HPX to change MPS or MRRS settings. We manage - * those to make sure they're consistent with the rest of the - * platform. - */ - hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | - PCI_EXP_DEVCTL_READRQ; - hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | - PCI_EXP_DEVCTL_READRQ); - - /* Initialize Device Control Register */ - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); - - /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) { - - /* - * If the Root Port supports Read Completion Boundary of - * 128, set RCB to 128. Otherwise, clear it. - */ - hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; - hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; - if (pcie_root_rcb_set(dev)) - hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; - - pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, - ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); - } - - /* Find Advanced Error Reporting Enhanced Capability */ - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return; - - /* Initialize Uncorrectable Error Mask Register */ - pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); - reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; - pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); - - /* Initialize Uncorrectable Error Severity Register */ - pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); - reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; - pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); - - /* Initialize Correctable Error Mask Register */ - pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); - reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; - pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); - - /* Initialize Advanced Error Capabilities and Control Register */ - pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); - reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; - - /* Don't enable ECRC generation or checking if unsupported */ - if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) - reg32 &= ~PCI_ERR_CAP_ECRC_GENE; - if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) - reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; - pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); - - /* - * FIXME: The following two registers are not supported yet. - * - * o Secondary Uncorrectable Error Severity Register - * o Secondary Uncorrectable Error Mask Register - */ -} - -static u16 hpx3_device_type(struct pci_dev *dev) -{ - u16 pcie_type = pci_pcie_type(dev); - const int pcie_to_hpx3_type[] = { - [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, - [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, - [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, - [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, - [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, - [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, - [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, - [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, - [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, - }; - - if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) - return 0; - - return pcie_to_hpx3_type[pcie_type]; -} - -static u8 hpx3_function_type(struct pci_dev *dev) -{ - if (dev->is_virtfn) - return HPX_FN_SRIOV_VIRT; - else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) - return HPX_FN_SRIOV_PHYS; - else - return HPX_FN_NORMAL; -} - -static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) -{ - u8 cap_ver = hpx3_cap_id & 0xf; - - if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) - return true; - else if (cap_ver == pcie_cap_id) - return true; - - return false; -} - -static void program_hpx_type3_register(struct pci_dev *dev, - const struct hpx_type3 *reg) -{ - u32 match_reg, write_reg, header, orig_value; - u16 pos; - - if (!(hpx3_device_type(dev) & reg->device_type)) - return; - - if (!(hpx3_function_type(dev) & reg->function_type)) - return; - - switch (reg->config_space_location) { - case HPX_CFG_PCICFG: - pos = 0; - break; - case HPX_CFG_PCIE_CAP: - pos = pci_find_capability(dev, reg->pci_exp_cap_id); - if (pos == 0) - return; - - break; - case HPX_CFG_PCIE_CAP_EXT: - pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); - if (pos == 0) - return; - - pci_read_config_dword(dev, pos, &header); - if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), - reg->pci_exp_cap_ver)) - return; - - break; - case HPX_CFG_VEND_CAP: /* Fall through */ - case HPX_CFG_DVSEC: /* Fall through */ - default: - pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); - return; - } - - pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); - - if ((match_reg & reg->match_mask_and) != reg->match_value) - return; - - pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); - orig_value = write_reg; - write_reg &= reg->reg_mask_and; - write_reg |= reg->reg_mask_or; - - if (orig_value == write_reg) - return; - - pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); - - pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", - pos, orig_value, write_reg); -} - -static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3) -{ - if (!hpx3) - return; - - if (!pci_is_pcie(dev)) - return; - - program_hpx_type3_register(dev, hpx3); -} - int pci_configure_extended_tags(struct pci_dev *dev, void *ign) { struct pci_host_bridge *host; @@ -2364,13 +2107,6 @@ static void pci_configure_serr(struct pci_dev *dev) static void pci_configure_device(struct pci_dev *dev) { - static const struct hotplug_program_ops hp_ops = { - .program_type0 = program_hpp_type0, - .program_type1 = program_hpp_type1, - .program_type2 = program_hpp_type2, - .program_type3 = program_hpx_type3, - }; - pci_configure_mps(dev); pci_configure_extended_tags(dev, NULL); pci_configure_relaxed_ordering(dev); @@ -2378,7 +2114,7 @@ static void pci_configure_device(struct pci_dev *dev) pci_configure_eetlp_prefix(dev); pci_configure_serr(dev); - pci_acpi_program_hp_params(dev, &hp_ops); + pci_acpi_program_hp_params(dev); } static void pci_release_capabilities(struct pci_dev *dev) @@ -2759,12 +2495,8 @@ static int only_one_child(struct pci_bus *bus) * A PCIe Downstream Port normally leads to a Link with only Device * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan * only for Device 0 in that situation. - * - * Checking has_secondary_link is a hack to identify Downstream - * Ports because sometimes Switches are configured such that the - * PCIe Port Type labels are backwards. */ - if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) + if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge)) return 1; return 0; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 44c4ae1abd00..320255e5e8f8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -20,7 +20,6 @@ #include <linux/delay.h> #include <linux/acpi.h> #include <linux/dmi.h> -#include <linux/pci-aspm.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/ktime.h> @@ -2593,6 +2592,59 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, nvenet_msi_disable); /* + * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled, + * then the device can't use INTx interrupts. Tegra's PCIe root ports don't + * generate MSI interrupts for PME and AER events instead only INTx interrupts + * are generated. Though Tegra's PCIe root ports can generate MSI interrupts + * for other events, since PCIe specificiation doesn't support using a mix of + * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port + * service drivers registering their respective ISRs for MSIs. + */ +static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev) +{ + dev->no_msi = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); + +/* * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing * config register. This register controls the routing of legacy * interrupts from devices that route through the MCP55. If this register @@ -2925,6 +2977,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, quirk_msi_intx_disable_qca_bug); + +/* + * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it + * should be disabled on platforms where the device (mistakenly) advertises it. + * + * Notice that this quirk also disables MSI (which may work, but hasn't been + * tested), since currently there is no standard way to disable only MSI-X. + * + * The 0031 device id is reused for other non Root Port device types, + * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. + */ +static void quirk_al_msi_disable(struct pci_dev *dev) +{ + dev->no_msi = 1; + pci_warn(dev, "Disabling MSI/MSI-X\n"); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, + PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable); #endif /* CONFIG_PCI_MSI */ /* @@ -4366,6 +4436,24 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) return ret; } +static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) +{ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) + return -ENOTTY; + + /* + * Amazon's Annapurna Labs root ports don't include an ACS capability, + * but do include ACS-like functionality. The hardware doesn't support + * peer-to-peer transactions via the root port and each has a unique + * segment number. + * + * Additionally, the root ports cannot send traffic to each other. + */ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return acs_flags ? 0 : 1; +} + /* * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, @@ -4466,6 +4554,19 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) return acs_flags ? 0 : 1; } +static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * iProc PAXB Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return acs_flags ? 0 : 1; +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -4559,6 +4660,9 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Amazon Annapurna Labs */ + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, { 0 } }; diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 7f4e65872b8d..bade14002fd8 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -15,7 +15,6 @@ #include "pci.h" DECLARE_RWSEM(pci_bus_sem); -EXPORT_SYMBOL_GPL(pci_bus_sem); /* * pci_for_each_dma_alias - Iterate over DMA aliases for a device diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 79b1fa6519be..e7dbe21705ba 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1662,8 +1662,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) int i; bool *unassigned = data; - for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { - struct resource *r = &dev->resource[i]; + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES]; struct pci_bus_region region; /* Not assigned or rejected by kernel? */ diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 5acd9c02683a..5486f8768c86 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -13,6 +13,8 @@ #include <linux/pci_regs.h> #include <linux/types.h> +#include "pci.h" + /** * pci_vc_save_restore_dwords - Save or restore a series of dwords * @dev: device @@ -105,7 +107,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ - if (!dev->has_secondary_link) + if (!pci_is_pcie(dev) || !pcie_downstream_port(dev)) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); @@ -409,7 +411,6 @@ void pci_restore_vc_state(struct pci_dev *dev) * For each type of VC capability, VC/VC9/MFVC, find the capability, size * it, and allocate a buffer for save/restore. */ - void pci_allocate_vc_save_buffers(struct pci_dev *dev) { int i; diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 4963c2e2bd4c..7915d10f9aa1 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -571,6 +571,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); +/* + * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port + * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. + */ +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, + PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd); /* * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig index e516967d695b..f9817c3ae85f 100644 --- a/drivers/phy/tegra/Kconfig +++ b/drivers/phy/tegra/Kconfig @@ -7,3 +7,10 @@ config PHY_TEGRA_XUSB To compile this driver as a module, choose M here: the module will be called phy-tegra-xusb. + +config PHY_TEGRA194_P2U + tristate "NVIDIA Tegra194 PIPE2UPHY PHY driver" + depends on ARCH_TEGRA_194_SOC || COMPILE_TEST + select GENERIC_PHY + help + Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x SOCs. diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile index 64ccaeacb631..320dd389f34d 100644 --- a/drivers/phy/tegra/Makefile +++ b/drivers/phy/tegra/Makefile @@ -6,3 +6,4 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o +obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c new file mode 100644 index 000000000000..7042bed9feaa --- /dev/null +++ b/drivers/phy/tegra/phy-tegra194-p2u.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * P2U (PIPE to UPHY) driver for Tegra T194 SoC + * + * Copyright (C) 2019 NVIDIA Corporation. + * + * Author: Vidya Sagar <vidyas@nvidia.com> + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> + +#define P2U_PERIODIC_EQ_CTRL_GEN3 0xc0 +#define P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN BIT(0) +#define P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN BIT(1) +#define P2U_PERIODIC_EQ_CTRL_GEN4 0xc4 +#define P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN BIT(1) + +#define P2U_RX_DEBOUNCE_TIME 0xa4 +#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK 0xffff +#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL 160 + +struct tegra_p2u { + void __iomem *base; +}; + +static inline void p2u_writel(struct tegra_p2u *phy, const u32 value, + const u32 reg) +{ + writel_relaxed(value, phy->base + reg); +} + +static inline u32 p2u_readl(struct tegra_p2u *phy, const u32 reg) +{ + return readl_relaxed(phy->base + reg); +} + +static int tegra_p2u_power_on(struct phy *x) +{ + struct tegra_p2u *phy = phy_get_drvdata(x); + u32 val; + + val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN3); + val &= ~P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN; + val |= P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN; + p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN3); + + val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN4); + val |= P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN; + p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN4); + + val = p2u_readl(phy, P2U_RX_DEBOUNCE_TIME); + val &= ~P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK; + val |= P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL; + p2u_writel(phy, val, P2U_RX_DEBOUNCE_TIME); + + return 0; +} + +static const struct phy_ops ops = { + .power_on = tegra_p2u_power_on, + .owner = THIS_MODULE, +}; + +static int tegra_p2u_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + struct phy *generic_phy; + struct tegra_p2u *phy; + struct resource *res; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctl"); + phy->base = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->base)) + return PTR_ERR(phy->base); + + platform_set_drvdata(pdev, phy); + + generic_phy = devm_phy_create(dev, NULL, &ops); + if (IS_ERR(generic_phy)) + return PTR_ERR(generic_phy); + + phy_set_drvdata(generic_phy, phy); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + return 0; +} + +static const struct of_device_id tegra_p2u_id_table[] = { + { + .compatible = "nvidia,tegra194-p2u", + }, + {} +}; +MODULE_DEVICE_TABLE(of, tegra_p2u_id_table); + +static struct platform_driver tegra_p2u_driver = { + .probe = tegra_p2u_probe, + .driver = { + .name = "tegra194-p2u", + .of_match_table = tegra_p2u_id_table, + }, +}; +module_platform_driver(tegra_p2u_driver); + +MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Tegra194 PIPE2UPHY PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 644f7f5c61a2..4a858789e6c5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -27,7 +27,6 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/aer.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 1bb6aada93fa..ac39ed79ccaa 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -21,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index d0c2f8d6f2a2..c8e512ba6d39 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -51,7 +51,6 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/raid_class.h> diff --git a/include/linux/memremap.h b/include/linux/memremap.h index fb2a0bd826b9..bef51e35d8d2 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -111,7 +111,6 @@ struct dev_pagemap { struct completion done; enum memory_type type; unsigned int flags; - u64 pci_p2pdma_bus_offset; const struct dev_pagemap_ops *ops; }; diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h deleted file mode 100644 index 67064145d76e..000000000000 --- a/include/linux/pci-aspm.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * aspm.h - * - * PCI Express ASPM defines and function prototypes - * - * Copyright (C) 2007 Intel Corp. - * Zhang Yanmin (yanmin.zhang@intel.com) - * Shaohua Li (shaohua.li@intel.com) - * - * For more information, please consult the following manuals (look at - * http://www.pcisig.com/ for how to get them): - * - * PCI Express Specification - */ - -#ifndef LINUX_ASPM_H -#define LINUX_ASPM_H - -#include <linux/pci.h> - -#define PCIE_LINK_STATE_L0S 1 -#define PCIE_LINK_STATE_L1 2 -#define PCIE_LINK_STATE_CLKPM 4 - -#ifdef CONFIG_PCIEASPM -int pci_disable_link_state(struct pci_dev *pdev, int state); -int pci_disable_link_state_locked(struct pci_dev *pdev, int state); -void pcie_no_aspm(void); -#else -static inline int pci_disable_link_state(struct pci_dev *pdev, int state) -{ return 0; } -static inline void pcie_no_aspm(void) { } -#endif - -#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index bca9bc3e5be7..8318a97c9c61 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -30,8 +30,10 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); -int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); +int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, @@ -81,11 +83,17 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } -static inline int pci_p2pdma_map_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir) +static inline int pci_p2pdma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } +static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { @@ -111,4 +119,16 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } +static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); +} + +static inline void pci_p2pdma_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir) +{ + pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); +} + #endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 82e4cd1b7ac3..f9088c89a534 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -6,12 +6,18 @@ * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares <mj@ucw.cz> * + * PCI Express ASPM defines and function prototypes + * Copyright (c) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification + * PCI Express Specification * PCI System Design Guide */ #ifndef LINUX_PCI_H @@ -145,11 +151,6 @@ static inline const char *pci_power_name(pci_power_t state) return pci_power_names[1 + (__force int) state]; } -#define PCI_PM_D2_DELAY 200 -#define PCI_PM_D3_WAIT 10 -#define PCI_PM_D3COLD_WAIT 100 -#define PCI_PM_BUS_WAIT 50 - /** * typedef pci_channel_state_t * @@ -418,7 +419,6 @@ struct pci_dev { unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; - unsigned int has_secondary_link:1; unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ @@ -649,9 +649,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) return dev->bus->self; } -struct device *pci_get_host_bridge_device(struct pci_dev *dev); -void pci_put_host_bridge_device(struct device *dev); - #ifdef CONFIG_PCI_MSI static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { @@ -925,6 +922,11 @@ enum { PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ }; +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ + /* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -969,7 +971,7 @@ resource_size_t pcibios_align_resource(void *, const struct resource *, resource_size_t, resource_size_t); -/* Weak but can be overriden by arch */ +/* Weak but can be overridden by arch */ void pci_fixup_cardbus(struct pci_bus *); /* Generic PCI functions used internally */ @@ -995,7 +997,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); -void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name, struct hotplug_slot *hotplug); @@ -1241,19 +1242,12 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); -bool pci_check_pme_status(struct pci_dev *dev); -void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); void pci_wakeup_bus(struct pci_bus *bus); void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); -/* PCI Virtual Channel */ -int pci_save_vc_state(struct pci_dev *dev); -void pci_restore_vc_state(struct pci_dev *dev); -void pci_allocate_vc_save_buffers(struct pci_dev *dev); - /* For use by arch with custom probe code */ void set_pcie_port_type(struct pci_dev *pdev); void set_pcie_hotplug_bridge(struct pci_dev *pdev); @@ -1297,8 +1291,6 @@ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ -struct pci_bus *pci_bus_get(struct pci_bus *bus); -void pci_bus_put(struct pci_bus *bus); void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); @@ -1408,11 +1400,6 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); -#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ -#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ -#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ -#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ - /* * Virtual interrupts allow for more interrupts to be allocated * than the device has interrupts for. These are not programmed @@ -1517,14 +1504,6 @@ static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) } #endif -static inline int -pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) -{ - return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, - NULL); -} - /** * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq * @d: the INTx IRQ domain @@ -1565,10 +1544,22 @@ extern bool pcie_ports_native; #define pcie_ports_native false #endif +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + #ifdef CONFIG_PCIEASPM +int pci_disable_link_state(struct pci_dev *pdev, int state); +int pci_disable_link_state_locked(struct pci_dev *pdev, int state); +void pcie_no_aspm(void); bool pcie_aspm_support_enabled(void); bool pcie_aspm_enabled(struct pci_dev *pdev); #else +static inline int pci_disable_link_state(struct pci_dev *pdev, int state) +{ return 0; } +static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) +{ return 0; } +static inline void pcie_no_aspm(void) { } static inline bool pcie_aspm_support_enabled(void) { return false; } static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif @@ -1579,23 +1570,8 @@ bool pci_aer_available(void); static inline bool pci_aer_available(void) { return false; } #endif -#ifdef CONFIG_PCIE_ECRC -void pcie_set_ecrc_checking(struct pci_dev *dev); -void pcie_ecrc_get_policy(char *str); -#else -static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } -static inline void pcie_ecrc_get_policy(char *str) { } -#endif - bool pci_ats_disabled(void); -#ifdef CONFIG_PCIE_PTM -int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); -#else -static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) -{ return -EINVAL; } -#endif - void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); @@ -1749,11 +1725,6 @@ static inline void pci_release_regions(struct pci_dev *dev) { } static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } -static inline void pci_block_cfg_access(struct pci_dev *dev) { } -static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) -{ return 0; } -static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } - static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { return NULL; } static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, @@ -1782,17 +1753,36 @@ static inline const struct pci_device_id *pci_match_id(const struct pci_device_i struct pci_dev *dev) { return NULL; } static inline bool pci_ats_disabled(void) { return true; } + +static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return -EINVAL; +} + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + struct irq_affinity *aff_desc) +{ + return -ENOSPC; +} #endif /* CONFIG_PCI */ +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + #ifdef CONFIG_PCI_ATS /* Address Translation Service */ -void pci_ats_init(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); #else -static inline void pci_ats_init(struct pci_dev *d) { } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } @@ -1803,7 +1793,7 @@ static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; } #include <asm/pci.h> -/* These two functions provide almost identical functionality. Depennding +/* These two functions provide almost identical functionality. Depending * on the architecture, one will be implemented as a wrapper around the * other (in drivers/pci/mmap.c). * @@ -1872,25 +1862,9 @@ static inline const char *pci_name(const struct pci_dev *pdev) return dev_name(&pdev->dev); } - -/* - * Some archs don't want to expose struct resource to userland as-is - * in sysfs and /proc - */ -#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); -#else -static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, resource_size_t *start, - resource_size_t *end) -{ - *start = rsrc->start; - *end = rsrc->end; -} -#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ - /* * The world is not perfect and supplies us with broken PCI devices. @@ -2032,10 +2006,6 @@ extern unsigned long pci_cardbus_mem_size; extern u8 pci_dfl_cache_line_size; extern u8 pci_cache_line_size; -extern unsigned long pci_hotplug_io_size; -extern unsigned long pci_hotplug_mem_size; -extern unsigned long pci_hotplug_bus_size; - /* Architecture-specific versions may override these (weak) */ void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); @@ -2305,10 +2275,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, #ifdef CONFIG_OF struct device_node; struct irq_domain; -void pci_set_of_node(struct pci_dev *dev); -void pci_release_of_node(struct pci_dev *dev); -void pci_set_bus_of_node(struct pci_bus *bus); -void pci_release_bus_of_node(struct pci_bus *bus); struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); int pci_parse_request_of_pci_ranges(struct device *dev, struct list_head *resources, @@ -2318,10 +2284,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev, struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ -static inline void pci_set_of_node(struct pci_dev *dev) { } -static inline void pci_release_of_node(struct pci_dev *dev) { } -static inline void pci_set_bus_of_node(struct pci_bus *bus) { } -static inline void pci_release_bus_of_node(struct pci_bus *bus) { } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } static inline int pci_parse_request_of_pci_ranges(struct device *dev, @@ -2435,4 +2397,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #define pci_notice_ratelimited(pdev, fmt, arg...) \ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) +#define pci_info_ratelimited(pdev, fmt, arg...) \ + dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f694eb2ca978..b482e42d7153 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -86,114 +86,14 @@ void pci_hp_deregister(struct hotplug_slot *slot); #define pci_hp_initialize(slot, bus, nr, name) \ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) -/* PCI Setting Record (Type 0) */ -struct hpp_type0 { - u32 revision; - u8 cache_line_size; - u8 latency_timer; - u8 enable_serr; - u8 enable_perr; -}; - -/* PCI-X Setting Record (Type 1) */ -struct hpp_type1 { - u32 revision; - u8 max_mem_read; - u8 avg_max_split; - u16 tot_max_split; -}; - -/* PCI Express Setting Record (Type 2) */ -struct hpp_type2 { - u32 revision; - u32 unc_err_mask_and; - u32 unc_err_mask_or; - u32 unc_err_sever_and; - u32 unc_err_sever_or; - u32 cor_err_mask_and; - u32 cor_err_mask_or; - u32 adv_err_cap_and; - u32 adv_err_cap_or; - u16 pci_exp_devctl_and; - u16 pci_exp_devctl_or; - u16 pci_exp_lnkctl_and; - u16 pci_exp_lnkctl_or; - u32 sec_unc_err_sever_and; - u32 sec_unc_err_sever_or; - u32 sec_unc_err_mask_and; - u32 sec_unc_err_mask_or; -}; - -/* - * _HPX PCI Express Setting Record (Type 3) - */ -struct hpx_type3 { - u16 device_type; - u16 function_type; - u16 config_space_location; - u16 pci_exp_cap_id; - u16 pci_exp_cap_ver; - u16 pci_exp_vendor_id; - u16 dvsec_id; - u16 dvsec_rev; - u16 match_offset; - u32 match_mask_and; - u32 match_value; - u16 reg_offset; - u32 reg_mask_and; - u32 reg_mask_or; -}; - -struct hotplug_program_ops { - void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp); - void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp); - void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp); - void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp); -}; - -enum hpx_type3_dev_type { - HPX_TYPE_ENDPOINT = BIT(0), - HPX_TYPE_LEG_END = BIT(1), - HPX_TYPE_RC_END = BIT(2), - HPX_TYPE_RC_EC = BIT(3), - HPX_TYPE_ROOT_PORT = BIT(4), - HPX_TYPE_UPSTREAM = BIT(5), - HPX_TYPE_DOWNSTREAM = BIT(6), - HPX_TYPE_PCI_BRIDGE = BIT(7), - HPX_TYPE_PCIE_BRIDGE = BIT(8), -}; - -enum hpx_type3_fn_type { - HPX_FN_NORMAL = BIT(0), - HPX_FN_SRIOV_PHYS = BIT(1), - HPX_FN_SRIOV_VIRT = BIT(2), -}; - -enum hpx_type3_cfg_loc { - HPX_CFG_PCICFG = 0, - HPX_CFG_PCIE_CAP = 1, - HPX_CFG_PCIE_CAP_EXT = 2, - HPX_CFG_VEND_CAP = 3, - HPX_CFG_DVSEC = 4, - HPX_CFG_MAX, -}; - #ifdef CONFIG_ACPI #include <linux/acpi.h> -int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops); bool pciehp_is_native(struct pci_dev *bridge); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); bool shpchp_is_native(struct pci_dev *bridge); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); #else -static inline int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops) -{ - return -ENODEV; -} - static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) { return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index de1b75e963ef..21a572469a4e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2134,6 +2134,7 @@ #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_MEDIATEK 0x14c3 +#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 @@ -2574,6 +2575,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 + #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index f28e562d7ca8..29d6e93fd15e 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -591,6 +591,7 @@ #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ +#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */ #define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ #define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ #define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ @@ -713,7 +714,9 @@ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM +#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ +#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -1053,4 +1056,14 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +/* Data Link Feature */ +#define PCI_DLF_CAP 0x04 /* Capabilities Register */ +#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ + +/* Physical Layer 16.0 GT/s */ +#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ +#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 + #endif /* LINUX_PCI_REGS_H */ |