aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-turris-mox-rwtm9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-optee-devices8
-rw-r--r--Documentation/ABI/testing/sysfs-devices-soc30
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx-src.txt49
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx-src.yaml82
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt56
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.yaml58
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt62
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml87
-rw-r--r--Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt59
-rw-r--r--Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml102
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-omap2/id.c20
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c70
-rw-r--r--drivers/clk/clk-scmi.c22
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c3
-rw-r--r--drivers/dma/ti/k3-udma-glue.c42
-rw-r--r--drivers/dma/ti/k3-udma.c34
-rw-r--r--drivers/firmware/arm_scmi/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/base.c108
-rw-r--r--drivers/firmware/arm_scmi/clock.c20
-rw-r--r--drivers/firmware/arm_scmi/common.h4
-rw-r--r--drivers/firmware/arm_scmi/driver.c15
-rw-r--r--drivers/firmware/arm_scmi/notify.c1526
-rw-r--r--drivers/firmware/arm_scmi/notify.h68
-rw-r--r--drivers/firmware/arm_scmi/perf.c151
-rw-r--r--drivers/firmware/arm_scmi/power.c92
-rw-r--r--drivers/firmware/arm_scmi/reset.c96
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c12
-rw-r--r--drivers/firmware/arm_scmi/sensors.c69
-rw-r--r--drivers/firmware/arm_scmi/smc.c1
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c2
-rw-r--r--drivers/firmware/imx/imx-scu-soc.c (renamed from drivers/soc/imx/soc-imx-scu.c)83
-rw-r--r--drivers/firmware/imx/imx-scu.c4
-rw-r--r--drivers/firmware/imx/rm.c45
-rw-r--r--drivers/firmware/imx/scu-pd.c14
-rw-r--r--drivers/firmware/qcom_scm.c8
-rw-r--r--drivers/firmware/smccc/Kconfig9
-rw-r--r--drivers/firmware/smccc/Makefile1
-rw-r--r--drivers/firmware/smccc/soc_id.c114
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c436
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/firmware/ti_sci.h2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c166
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c1
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c26
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c6
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c2
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/brcmstb_dpfe.c7
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/da8xx-ddrctl.c2
-rw-r--r--drivers/memory/emif-asm-offsets.c10
-rw-r--r--drivers/memory/emif.c23
-rw-r--r--drivers/memory/fsl_ifc.c30
-rw-r--r--drivers/memory/jz4780-nemc.c17
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/memory/mvebu-devbus.c20
-rw-r--r--drivers/memory/of_memory.c32
-rw-r--r--drivers/memory/of_memory.h21
-rw-r--r--drivers/memory/omap-gpmc.c66
-rw-r--r--drivers/memory/pl172.c19
-rw-r--r--drivers/memory/samsung/Kconfig7
-rw-r--r--drivers/memory/samsung/exynos-srom.c22
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c15
-rw-r--r--drivers/memory/tegra/Kconfig14
-rw-r--r--drivers/memory/tegra/Makefile4
-rw-r--r--drivers/memory/tegra/mc.h1
-rw-r--r--drivers/memory/tegra/tegra124-emc.c7
-rw-r--r--drivers/memory/tegra/tegra186-emc.c25
-rw-r--r--drivers/memory/tegra/tegra186.c4
-rw-r--r--drivers/memory/tegra/tegra20-emc.c34
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c1775
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2100
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c90
-rw-r--r--drivers/memory/tegra/tegra210-emc.h1016
-rw-r--r--drivers/memory/tegra/tegra210-mc.h50
-rw-r--r--drivers/memory/tegra/tegra30-emc.c122
-rw-r--r--drivers/memory/ti-aemif.c16
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/reset/reset-intel-gw.c24
-rw-r--r--drivers/reset/reset-simple.c23
-rw-r--r--drivers/reset/reset-socfpga.c3
-rw-r--r--drivers/reset/reset-sunxi.c3
-rw-r--r--drivers/reset/reset-ti-sci.c2
-rw-r--r--drivers/reset/reset-ti-syscon.c2
-rw-r--r--drivers/reset/reset-uniphier-glue.c3
-rw-r--r--drivers/soc/imx/Kconfig10
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c46
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/pdr_interface.c4
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c165
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c19
-rw-r--r--drivers/soc/qcom/rpmh.c4
-rw-r--r--drivers/soc/qcom/smd-rpm.c5
-rw-r--r--drivers/soc/qcom/socinfo.c65
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a774e1-sysc.c43
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/samsung/Kconfig3
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/exynos-regulator-coupler.c221
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c2
-rw-r--r--drivers/soc/ti/k3-ringacc.c200
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c22
-rw-r--r--drivers/soc/versatile/soc-integrator.c48
-rw-r--r--drivers/soc/versatile/soc-realview.c48
-rw-r--r--drivers/spi/spi-geni-qcom.c193
-rw-r--r--drivers/spi/spi-qcom-qspi.c117
-rw-r--r--drivers/tee/optee/core.c27
-rw-r--r--drivers/tee/optee/device.c38
-rw-r--r--drivers/tee/optee/optee_private.h10
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c210
-rw-r--r--include/dt-bindings/reset/ti-syscon.h2
-rw-r--r--include/linux/arm-smccc.h5
-rw-r--r--include/linux/firmware/imx/sci.h2
-rw-r--r--include/linux/firmware/imx/svc/rm.h69
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h2
-rw-r--r--include/linux/of.h5
-rw-r--r--include/linux/qcom-geni-se.h45
-rw-r--r--include/linux/reset/reset-simple.h (renamed from drivers/reset/reset-simple.h)7
-rw-r--r--include/linux/scmi_protocol.h110
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h31
-rw-r--r--include/linux/soc/ti/k3-ringacc.h4
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h6
-rw-r--r--include/soc/qcom/rpmh.h7
-rw-r--r--include/soc/tegra/bpmp-abi.h913
-rw-r--r--include/soc/tegra/fuse.h2
-rw-r--r--include/trace/events/scmi.h6
143 files changed, 10925 insertions, 1350 deletions
diff --git a/Documentation/ABI/testing/debugfs-turris-mox-rwtm b/Documentation/ABI/testing/debugfs-turris-mox-rwtm
new file mode 100644
index 000000000000..2b3255ee68fd
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-turris-mox-rwtm
@@ -0,0 +1,9 @@
+What: /sys/kernel/debug/turris-mox-rwtm/do_sign
+Date: Jun 2020
+KernelVersion: 5.8
+Contact: Marek Behún <marek.behun@nic.cz>
+Description: (W) Message to sign with the ECDSA private key stored in
+ device's OTP. The message must be exactly 64 bytes (since
+ this is intended for SHA-512 hashes).
+ (R) The resulting signature, 136 bytes. This contains the R and
+ S values of the ECDSA signature, both in big-endian format.
diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
new file mode 100644
index 000000000000..0f58701367b6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
@@ -0,0 +1,8 @@
+What: /sys/bus/tee/devices/optee-ta-<uuid>/
+Date: May 2020
+KernelVersion 5.8
+Contact: op-tee@lists.trustedfirmware.org
+Description:
+ OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
+ matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
+ are free to create needed API under optee-ta-<uuid> directory.
diff --git a/Documentation/ABI/testing/sysfs-devices-soc b/Documentation/ABI/testing/sysfs-devices-soc
index ba3a3fac0ee1..ea999e292f11 100644
--- a/Documentation/ABI/testing/sysfs-devices-soc
+++ b/Documentation/ABI/testing/sysfs-devices-soc
@@ -26,6 +26,30 @@ Description:
Read-only attribute common to all SoCs. Contains SoC family name
(e.g. DB8500).
+ On many of ARM based silicon with SMCCC v1.2+ compliant firmware
+ this will contain the JEDEC JEP106 manufacturer’s identification
+ code. The format is "jep106:XXYY" where XX is identity code and
+ YY is continuation code.
+
+ This manufacturer’s identification code is defined by one
+ or more eight (8) bit fields, each consisting of seven (7)
+ data bits plus one (1) odd parity bit. It is a single field,
+ limiting the possible number of vendors to 126. To expand
+ the maximum number of identification codes, a continuation
+ scheme has been defined.
+
+ The specified mechanism is that an identity code of 0x7F
+ represents the "continuation code" and implies the presence
+ of an additional identity code field, and this mechanism
+ may be extended to multiple continuation codes followed
+ by the manufacturer's identity code.
+
+ For example, ARM has identity code 0x7F 0x7F 0x7F 0x7F 0x3B,
+ which is code 0x3B on the fifth 'page'. This is shortened
+ as JEP106 identity code of 0x3B and a continuation code of
+ 0x4 to represent the four continuation codes preceding the
+ identity code.
+
What: /sys/devices/socX/serial_number
Date: January 2019
contact: Bjorn Andersson <bjorn.andersson@linaro.org>
@@ -40,6 +64,12 @@ Description:
Read-only attribute supported by most SoCs. In the case of
ST-Ericsson's chips this contains the SoC serial number.
+ On many of ARM based silicon with SMCCC v1.2+ compliant firmware
+ this will contain the SOC ID appended to the family attribute
+ to ensure there is no conflict in this namespace across various
+ vendors. The format is "jep106:XXYY:ZZZZ" where XX is identity
+ code, YY is continuation code and ZZZZ is the SOC ID.
+
What: /sys/devices/socX/revision
Date: January 2012
contact: Lee Jones <lee.jones@linaro.org>
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 354b448fc0c3..78456437df5f 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -11,10 +11,12 @@ Required properties:
* "qcom,scm-apq8084"
* "qcom,scm-ipq4019"
* "qcom,scm-ipq806x"
+ * "qcom,scm-ipq8074"
* "qcom,scm-msm8660"
* "qcom,scm-msm8916"
* "qcom,scm-msm8960"
* "qcom,scm-msm8974"
+ * "qcom,scm-msm8994"
* "qcom,scm-msm8996"
* "qcom,scm-msm8998"
* "qcom,scm-sc7180"
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
index 1a8718f8855d..178fca08278f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
@@ -55,7 +55,7 @@ Required Properties:
corresponds to a range of host irqs.
For more details on TISCI IRQ resource management refer:
-http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
+https://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
Example:
--------
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx-src.txt
deleted file mode 100644
index 6ed79e60248a..000000000000
--- a/Documentation/devicetree/bindings/reset/fsl,imx-src.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-Freescale i.MX System Reset Controller
-======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: Should be "fsl,<chip>-src"
-- reg: should be register base and length as documented in the
- datasheet
-- interrupts: Should contain SRC interrupt and CPU WDOG interrupt,
- in this order.
-- #reset-cells: 1, see below
-
-example:
-
-src: src@20d8000 {
- compatible = "fsl,imx6q-src";
- reg = <0x020d8000 0x4000>;
- interrupts = <0 91 0x04 0 96 0x04>;
- #reset-cells = <1>;
-};
-
-Specifying reset lines connected to IP modules
-==============================================
-
-The system reset controller can be used to reset the GPU, VPU,
-IPU, and OpenVG IP modules on i.MX5 and i.MX6 ICs. Those device
-nodes should specify the reset line on the SRC in their resets
-property, containing a phandle to the SRC device node and a
-RESET_INDEX specifying which module to reset, as described in
-reset.txt
-
-example:
-
- ipu1: ipu@2400000 {
- resets = <&src 2>;
- };
- ipu2: ipu@2800000 {
- resets = <&src 4>;
- };
-
-The following RESET_INDEX values are valid for i.MX5:
-GPU_RESET 0
-VPU_RESET 1
-IPU1_RESET 2
-OPEN_VG_RESET 3
-The following additional RESET_INDEX value is valid for i.MX6:
-IPU2_RESET 4
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml b/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml
new file mode 100644
index 000000000000..27c5e34a3ac6
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/fsl,imx-src.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX System Reset Controller
+
+maintainers:
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ The system reset controller can be used to reset the GPU, VPU,
+ IPU, and OpenVG IP modules on i.MX5 and i.MX6 ICs. Those device
+ nodes should specify the reset line on the SRC in their resets
+ property, containing a phandle to the SRC device node and a
+ RESET_INDEX specifying which module to reset, as described in
+ reset.txt
+
+ The following RESET_INDEX values are valid for i.MX5:
+ GPU_RESET 0
+ VPU_RESET 1
+ IPU1_RESET 2
+ OPEN_VG_RESET 3
+ The following additional RESET_INDEX value is valid for i.MX6:
+ IPU2_RESET 4
+
+properties:
+ compatible:
+ oneOf:
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx50-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx53-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx6q-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx6sx-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx6sl-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx6ul-src"
+ - const: "fsl,imx51-src"
+ - items:
+ - const: "fsl,imx6sll-src"
+ - const: "fsl,imx51-src"
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: SRC interrupt
+ - description: CPU WDOG interrupts out of SRC
+ minItems: 1
+ maxItems: 2
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ reset-controller@73fd0000 {
+ compatible = "fsl,imx51-src";
+ reg = <0x73fd0000 0x4000>;
+ interrupts = <75>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
deleted file mode 100644
index e10502d9153e..000000000000
--- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Freescale i.MX7 System Reset Controller
-======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible:
- - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon"
- - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon"
- - For i.MX8MM SoCs should be "fsl,imx8mm-src", "fsl,imx8mq-src", "syscon"
- - For i.MX8MN SoCs should be "fsl,imx8mn-src", "fsl,imx8mq-src", "syscon"
- - For i.MX8MP SoCs should be "fsl,imx8mp-src", "syscon"
-- reg: should be register base and length as documented in the
- datasheet
-- interrupts: Should contain SRC interrupt
-- #reset-cells: 1, see below
-
-example:
-
-src: reset-controller@30390000 {
- compatible = "fsl,imx7d-src", "syscon";
- reg = <0x30390000 0x2000>;
- interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
- #reset-cells = <1>;
-};
-
-
-Specifying reset lines connected to IP modules
-==============================================
-
-The system reset controller can be used to reset various set of
-peripherals. Device nodes that need access to reset lines should
-specify them as a reset phandle in their corresponding node as
-specified in reset.txt.
-
-Example:
-
- pcie: pcie@33800000 {
-
- ...
-
- resets = <&src IMX7_RESET_PCIEPHY>,
- <&src IMX7_RESET_PCIE_CTRL_APPS_EN>;
- reset-names = "pciephy", "apps";
-
- ...
- };
-
-
-For list of all valid reset indices see
-<dt-bindings/reset/imx7-reset.h> for i.MX7,
-<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ and
-<dt-bindings/reset/imx8mq-reset.h> for i.MX8MM and
-<dt-bindings/reset/imx8mq-reset.h> for i.MX8MN and
-<dt-bindings/reset/imx8mp-reset.h> for i.MX8MP
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.yaml b/Documentation/devicetree/bindings/reset/fsl,imx7-src.yaml
new file mode 100644
index 000000000000..b1a71c1bb05b
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/fsl,imx7-src.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX7 System Reset Controller
+
+maintainers:
+ - Andrey Smirnov <andrew.smirnov@gmail.com>
+
+description: |
+ The system reset controller can be used to reset various set of
+ peripherals. Device nodes that need access to reset lines should
+ specify them as a reset phandle in their corresponding node as
+ specified in reset.txt.
+
+ For list of all valid reset indices see
+ <dt-bindings/reset/imx7-reset.h> for i.MX7,
+ <dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ, i.MX8MM and i.MX8MN,
+ <dt-bindings/reset/imx8mp-reset.h> for i.MX8MP.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,imx7d-src
+ - fsl,imx8mq-src
+ - fsl,imx8mp-src
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ reset-controller@30390000 {
+ compatible = "fsl,imx7d-src", "syscon";
+ reg = <0x30390000 0x2000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
deleted file mode 100644
index 616fddcd09fd..000000000000
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-Qualcomm Resource Power Manager (RPM) over SMD
-
-This driver is used to interface with the Resource Power Manager (RPM) found in
-various Qualcomm platforms. The RPM allows each component in the system to vote
-for state of the system resources, such as clocks, regulators and bus
-frequencies.
-
-The SMD information for the RPM edge should be filled out. See qcom,smd.txt for
-the required edge properties. All SMD related properties will reside within the
-RPM node itself.
-
-= SUBDEVICES
-
-The RPM exposes resources to its subnodes. The rpm_requests node must be
-present and this subnode may contain children that designate regulator
-resources.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,rpm-apq8084"
- "qcom,rpm-msm8916"
- "qcom,rpm-msm8974"
- "qcom,rpm-msm8976"
- "qcom,rpm-msm8998"
- "qcom,rpm-sdm660"
- "qcom,rpm-qcs404"
-
-- qcom,smd-channels:
- Usage: required
- Value type: <string>
- Definition: must be "rpm_requests"
-
-Refer to Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
-for information on the regulator subnodes that can exist under the rpm_requests.
-
-Example:
-
- soc {
- apcs: syscon@f9011000 {
- compatible = "syscon";
- reg = <0xf9011000 0x1000>;
- };
- };
-
- smd {
- compatible = "qcom,smd";
-
- rpm {
- interrupts = <0 168 1>;
- qcom,ipc = <&apcs 8 0>;
- qcom,smd-edge = <15>;
-
- rpm_requests {
- compatible = "qcom,rpm-msm8974";
- qcom,smd-channels = "rpm_requests";
-
- ...
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
new file mode 100644
index 000000000000..468d658ce3e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/qcom/qcom,smd-rpm.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Resource Power Manager (RPM) over SMD
+
+description: |
+ This driver is used to interface with the Resource Power Manager (RPM) found
+ in various Qualcomm platforms. The RPM allows each component in the system
+ to vote for state of the system resources, such as clocks, regulators and bus
+ frequencies.
+
+ The SMD information for the RPM edge should be filled out. See qcom,smd.txt
+ for the required edge properties. All SMD related properties will reside
+ within the RPM node itself.
+
+ The RPM exposes resources to its subnodes. The rpm_requests node must be
+ present and this subnode may contain children that designate regulator
+ resources.
+
+ Refer to Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
+ for information on the regulator subnodes that can exist under the
+ rpm_requests.
+
+maintainers:
+ - Kathiravan T <kathirav@codeaurora.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,rpm-apq8084
+ - qcom,rpm-ipq6018
+ - qcom,rpm-msm8916
+ - qcom,rpm-msm8974
+ - qcom,rpm-msm8976
+ - qcom,rpm-msm8996
+ - qcom,rpm-msm8998
+ - qcom,rpm-sdm660
+ - qcom,rpm-qcs404
+
+ qcom,smd-channels:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description: Channel name used for the RPM communication
+ items:
+ - const: rpm_requests
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,rpm-apq8084
+ - qcom,rpm-msm8916
+ - qcom,rpm-msm8974
+then:
+ required:
+ - qcom,smd-channels
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8974";
+ qcom,smd-channels = "rpm_requests";
+
+ /* Regulator nodes to follow */
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt
deleted file mode 100644
index 59758ccce809..000000000000
--- a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* Texas Instruments K3 NavigatorSS Ring Accelerator
-
-The Ring Accelerator (RA) is a machine which converts read/write accesses
-from/to a constant address into corresponding read/write accesses from/to a
-circular data structure in memory. The RA eliminates the need for each DMA
-controller which needs to access ring elements from having to know the current
-state of the ring (base address, current offset). The DMA controller
-performs a read or write access to a specific address range (which maps to the
-source interface on the RA) and the RA replaces the address for the transaction
-with a new address which corresponds to the head or tail element of the ring
-(head for reads, tail for writes).
-
-The Ring Accelerator is a hardware module that is responsible for accelerating
-management of the packet queues. The K3 SoCs can have more than one RA instances
-
-Required properties:
-- compatible : Must be "ti,am654-navss-ringacc";
-- reg : Should contain register location and length of the following
- named register regions.
-- reg-names : should be
- "rt" - The RA Ring Real-time Control/Status Registers
- "fifos" - The RA Queues Registers
- "proxy_gcfg" - The RA Proxy Global Config Registers
- "proxy_target" - The RA Proxy Datapath Registers
-- ti,num-rings : Number of rings supported by RA
-- ti,sci-rm-range-gp-rings : TI-SCI RM subtype for GP ring range
-- ti,sci : phandle on TI-SCI compatible System controller node
-- ti,sci-dev-id : TI-SCI device id of the ring accelerator
-- msi-parent : phandle for "ti,sci-inta" interrupt controller
-
-Optional properties:
- -- ti,dma-ring-reset-quirk : enable ringacc / udma ring state interoperability
- issue software w/a
-
-Example:
-
-ringacc: ringacc@3c000000 {
- compatible = "ti,am654-navss-ringacc";
- reg = <0x0 0x3c000000 0x0 0x400000>,
- <0x0 0x38000000 0x0 0x400000>,
- <0x0 0x31120000 0x0 0x100>,
- <0x0 0x33000000 0x0 0x40000>;
- reg-names = "rt", "fifos",
- "proxy_gcfg", "proxy_target";
- ti,num-rings = <818>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
- ti,dma-ring-reset-quirk;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <187>;
- msi-parent = <&inta_main_udmass>;
-};
-
-client:
-
-dma_ipx: dma_ipx@<addr> {
- ...
- ti,ringacc = <&ringacc>;
- ...
-}
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
new file mode 100644
index 000000000000..ae33fc957141
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/ti/k3-ringacc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments K3 NavigatorSS Ring Accelerator
+
+maintainers:
+ - Santosh Shilimkar <ssantosh@kernel.org>
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+
+description: |
+ The Ring Accelerator (RA) is a machine which converts read/write accesses
+ from/to a constant address into corresponding read/write accesses from/to a
+ circular data structure in memory. The RA eliminates the need for each DMA
+ controller which needs to access ring elements from having to know the current
+ state of the ring (base address, current offset). The DMA controller
+ performs a read or write access to a specific address range (which maps to the
+ source interface on the RA) and the RA replaces the address for the transaction
+ with a new address which corresponds to the head or tail element of the ring
+ (head for reads, tail for writes).
+
+ The Ring Accelerator is a hardware module that is responsible for accelerating
+ management of the packet queues. The K3 SoCs can have more than one RA instances
+
+properties:
+ compatible:
+ items:
+ - const: ti,am654-navss-ringacc
+
+ reg:
+ items:
+ - description: real time registers regions
+ - description: fifos registers regions
+ - description: proxy gcfg registers regions
+ - description: proxy target registers regions
+
+ reg-names:
+ items:
+ - const: rt
+ - const: fifos
+ - const: proxy_gcfg
+ - const: proxy_target
+
+ msi-parent: true
+
+ ti,num-rings:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of rings supported by RA
+
+ ti,sci-rm-range-gp-rings:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: TI-SCI RM subtype for GP ring range
+
+ ti,sci:
+ $ref: /schemas/types.yaml#definitions/phandle-array
+ description: phandle on TI-SCI compatible System controller node
+
+ ti,sci-dev-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: TI-SCI device id of the ring accelerator
+
+ ti,dma-ring-reset-quirk:
+ $ref: /schemas/types.yaml#definitions/flag
+ description: |
+ enable ringacc/udma ring state interoperability issue software w/a
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - msi-parent
+ - ti,num-rings
+ - ti,sci-rm-range-gp-rings
+ - ti,sci
+ - ti,sci-dev-id
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ringacc: ringacc@3c000000 {
+ compatible = "ti,am654-navss-ringacc";
+ reg = <0x0 0x3c000000 0x0 0x400000>,
+ <0x0 0x38000000 0x0 0x400000>,
+ <0x0 0x31120000 0x0 0x100>,
+ <0x0 0x33000000 0x0 0x40000>;
+ reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+ ti,num-rings = <818>;
+ ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,dma-ring-reset-quirk;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <187>;
+ msi-parent = <&inta_main_udmass>;
+ };
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 9193fd3cfb55..4ef80ba25c66 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11117,6 +11117,14 @@ F: Documentation/core-api/boot-time-mm.rst
F: include/linux/memblock.h
F: mm/memblock.c
+MEMORY CONTROLLER DRIVERS
+M: Krzysztof Kozlowski <krzk@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
+F: Documentation/devicetree/bindings/memory-controllers/
+F: drivers/memory/
+
MEMORY MANAGEMENT
M: Andrew Morton <akpm@linux-foundation.org>
L: linux-mm@kvack.org
@@ -12729,6 +12737,7 @@ OP-TEE DRIVER
M: Jens Wiklander <jens.wiklander@linaro.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-optee-devices
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 76838255b5fa..f185cd3d4c62 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -118,6 +118,7 @@ config SOC_EXYNOS5800
bool "Samsung EXYNOS5800"
default y
depends on SOC_EXYNOS5420
+ select EXYNOS_REGULATOR_COUPLER
config EXYNOS_MCPM
bool
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 188ea5258c99..1d119b974f5f 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -775,19 +775,23 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "Unknown");
}
-static ssize_t omap_get_type(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", omap_types[omap_type()]);
}
-static struct device_attribute omap_soc_attr =
- __ATTR(type, S_IRUGO, omap_get_type, NULL);
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *omap_soc_attrs[] = {
+ &dev_attr_type.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(omap_soc);
void __init omap_soc_device_init(void)
{
- struct device *parent;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
@@ -798,14 +802,12 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family();
soc_dev_attr->revision = soc_rev;
+ soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return;
}
-
- parent = soc_device_to_device(soc_dev);
- device_create_file(parent, &omap_soc_attr);
}
#endif /* CONFIG_SOC_BUS */
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9fde42c2da0e..e0f33826819f 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -881,7 +881,6 @@ CONFIG_OWL_PM_DOMAINS=y
CONFIG_RASPBERRYPI_POWER=y
CONFIG_FSL_DPAA=y
CONFIG_FSL_MC_DPIO=y
-CONFIG_IMX_SCU_SOC=y
CONFIG_QCOM_AOSS_QMP=y
CONFIG_QCOM_GENI_SE=y
CONFIG_QCOM_RMTFS_MEM=m
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2491a2cb54a2..2ccdf8ac6994 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -214,11 +214,10 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
* Return:
* On success, 0. On failure, -errno.
*/
-static int ftpm_tee_probe(struct platform_device *pdev)
+static int ftpm_tee_probe(struct device *dev)
{
int rc;
struct tpm_chip *chip;
- struct device *dev = &pdev->dev;
struct ftpm_tee_private *pvt_data = NULL;
struct tee_ioctl_open_session_arg sess_arg;
@@ -297,6 +296,13 @@ out_tee_session:
return rc;
}
+static int ftpm_plat_tee_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_probe(dev);
+}
+
/**
* ftpm_tee_remove() - remove the TPM device
* @pdev: the platform_device description.
@@ -304,9 +310,9 @@ out_tee_session:
* Return:
* 0 always.
*/
-static int ftpm_tee_remove(struct platform_device *pdev)
+static int ftpm_tee_remove(struct device *dev)
{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(dev);
/* Release the chip */
tpm_chip_unregister(pvt_data->chip);
@@ -328,11 +334,18 @@ static int ftpm_tee_remove(struct platform_device *pdev)
return 0;
}
+static int ftpm_plat_tee_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_remove(dev);
+}
+
/**
* ftpm_tee_shutdown() - shutdown the TPM device
* @pdev: the platform_device description.
*/
-static void ftpm_tee_shutdown(struct platform_device *pdev)
+static void ftpm_plat_tee_shutdown(struct platform_device *pdev)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
@@ -347,17 +360,54 @@ static const struct of_device_id of_ftpm_tee_ids[] = {
};
MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
-static struct platform_driver ftpm_tee_driver = {
+static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
.of_match_table = of_match_ptr(of_ftpm_tee_ids),
},
- .probe = ftpm_tee_probe,
- .remove = ftpm_tee_remove,
- .shutdown = ftpm_tee_shutdown,
+ .shutdown = ftpm_plat_tee_shutdown,
+ .probe = ftpm_plat_tee_probe,
+ .remove = ftpm_plat_tee_remove,
+};
+
+/* UUID of the fTPM TA */
+static const struct tee_client_device_id optee_ftpm_id_table[] = {
+ {UUID_INIT(0xbc50d971, 0xd4c9, 0x42c4,
+ 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96)},
+ {}
};
-module_platform_driver(ftpm_tee_driver);
+MODULE_DEVICE_TABLE(tee, optee_ftpm_id_table);
+
+static struct tee_client_driver ftpm_tee_driver = {
+ .id_table = optee_ftpm_id_table,
+ .driver = {
+ .name = "optee-ftpm",
+ .bus = &tee_bus_type,
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+ },
+};
+
+static int __init ftpm_mod_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&ftpm_tee_plat_driver);
+ if (rc)
+ return rc;
+
+ return driver_register(&ftpm_tee_driver.driver);
+}
+
+static void __exit ftpm_mod_exit(void)
+{
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ driver_unregister(&ftpm_tee_driver.driver);
+}
+
+module_init(ftpm_mod_init);
+module_exit(ftpm_mod_exit);
MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index c491f5de0f3f..c754dfbb73fd 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
{
int ret;
+ unsigned long min_rate, max_rate;
+
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
@@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
sclk->hw.init = &init;
ret = devm_clk_hw_register(dev, &sclk->hw);
- if (!ret)
- clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
- sclk->info->range.max_rate);
+ if (ret)
+ return ret;
+
+ if (sclk->info->rate_discrete) {
+ int num_rates = sclk->info->list.num_rates;
+
+ if (num_rates <= 0)
+ return -EINVAL;
+
+ min_rate = sclk->info->list.rates[0];
+ max_rate = sclk->info->list.rates[num_rates - 1];
+ } else {
+ min_rate = sclk->info->range.min_rate;
+ max_rate = sclk->info->range.max_rate;
+ }
+
+ clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
return ret;
}
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 61623e2ff149..1cf688fcb56b 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -198,7 +198,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
- policy->fast_switch_possible = true;
+ policy->fast_switch_possible =
+ handle->perf_ops->fast_switch_possible(handle, cpu_dev);
em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 64c8955e0cf1..c888ae4fec96 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -271,20 +271,12 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
/* request and cfg rings */
- tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
- tx_chn->udma_tchan_id, 0);
- if (!tx_chn->ringtx) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get TX ring %u\n",
- tx_chn->udma_tchan_id);
- goto err;
- }
-
- tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
- -1, 0);
- if (!tx_chn->ringtxcq) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get TXCQ ring\n");
+ ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, -1,
+ &tx_chn->ringtx,
+ &tx_chn->ringtxcq);
+ if (ret) {
+ dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
goto err;
}
@@ -587,22 +579,16 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
}
/* request and cfg rings */
- flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
- flow_cfg->ring_rxq_id, 0);
- if (!flow->ringrx) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get RX ring\n");
+ ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id,
+ flow_cfg->ring_rxfdq0_id,
+ &flow->ringrxfdq,
+ &flow->ringrx);
+ if (ret) {
+ dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
goto err_rflow_put;
}
- flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
- flow_cfg->ring_rxfdq0_id, 0);
- if (!flow->ringrxfdq) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get RXFDQ ring\n");
- goto err_ringrx_free;
- }
-
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
@@ -673,8 +659,6 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
err_ringrxfdq_free:
k3_ringacc_ring_free(flow->ringrxfdq);
-
-err_ringrx_free:
k3_ringacc_ring_free(flow->ringrx);
err_rflow_put:
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6c879a734360..49d0d3af6311 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1418,17 +1418,12 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
if (ret)
return ret;
- uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
- uc->tchan->id, 0);
- if (!uc->tchan->t_ring) {
- ret = -EBUSY;
- goto err_tx_ring;
- }
-
- uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
- if (!uc->tchan->tc_ring) {
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
+ &uc->tchan->t_ring,
+ &uc->tchan->tc_ring);
+ if (ret) {
ret = -EBUSY;
- goto err_txc_ring;
+ goto err_ring;
}
memset(&ring_cfg, 0, sizeof(ring_cfg));
@@ -1447,10 +1442,9 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
err_ringcfg:
k3_ringacc_ring_free(uc->tchan->tc_ring);
uc->tchan->tc_ring = NULL;
-err_txc_ring:
k3_ringacc_ring_free(uc->tchan->t_ring);
uc->tchan->t_ring = NULL;
-err_tx_ring:
+err_ring:
udma_put_tchan(uc);
return ret;
@@ -1499,16 +1493,11 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
rflow = uc->rflow;
fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
- rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
- if (!rflow->fd_ring) {
- ret = -EBUSY;
- goto err_rx_ring;
- }
-
- rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
- if (!rflow->r_ring) {
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
+ &rflow->fd_ring, &rflow->r_ring);
+ if (ret) {
ret = -EBUSY;
- goto err_rxc_ring;
+ goto err_ring;
}
memset(&ring_cfg, 0, sizeof(ring_cfg));
@@ -1533,10 +1522,9 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
err_ringcfg:
k3_ringacc_ring_free(rflow->r_ring);
rflow->r_ring = NULL;
-err_rxc_ring:
k3_ringacc_ring_free(rflow->fd_ring);
rflow->fd_ring = NULL;
-err_rx_ring:
+err_ring:
udma_put_rflow(uc);
err_rflow:
udma_put_rchan(uc);
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 1cad32b38b29..6f9cbc4aef22 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
-scmi-driver-y = driver.o
+scmi-driver-y = driver.o notify.o
scmi-transport-y = shmem.o
scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
-scmi-transport-$(CONFIG_ARM_PSCI_FW) += smc.o
+scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index ce7d9203e41b..9853bd3c4d45 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -5,7 +5,15 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
+
+#define SCMI_BASE_NUM_SOURCES 1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
enum scmi_base_protocol_cmd {
BASE_DISCOVER_VENDOR = 0x3,
@@ -19,16 +27,25 @@ enum scmi_base_protocol_cmd {
BASE_RESET_AGENT_CONFIGURATION = 0xb,
};
-enum scmi_base_protocol_notify {
- BASE_ERROR_EVENT = 0x0,
-};
-
struct scmi_msg_resp_base_attributes {
u8 num_protocols;
u8 num_agents;
__le16 reserved;
};
+struct scmi_msg_base_error_notify {
+ __le32 event_control;
+#define BASE_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_base_error_notify_payld {
+ __le32 agent_id;
+ __le32 error_status;
+#define IS_FATAL_ERROR(x) ((x) & BIT(31))
+#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x))
+ __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
+};
+
/**
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
@@ -222,6 +239,83 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
return ret;
}
+static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_base_error_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
+ SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_base_error_notify(handle, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ int i;
+ const struct scmi_base_error_notify_payld *p = payld;
+ struct scmi_base_error_report *r = report;
+
+ /*
+ * BaseError notification payload is variable in size but
+ * up to a maximum length determined by the struct ponted by p.
+ * Instead payld_sz is the effective length of this notification
+ * payload so cannot be greater of the maximum allowed size as
+ * pointed by p.
+ */
+ if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
+ r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
+ for (i = 0; i < r->cmd_count; i++)
+ r->reports[i] = le64_to_cpu(p->msg_reports[i]);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event base_events[] = {
+ {
+ .id = SCMI_EVENT_BASE_ERROR_EVENT,
+ .max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
+ .max_report_sz = sizeof(struct scmi_base_error_report) +
+ SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
+ },
+};
+
+static const struct scmi_event_ops base_event_ops = {
+ .set_notify_enabled = scmi_base_set_notify_enabled,
+ .fill_custom_report = scmi_base_fill_custom_report,
+};
+
int scmi_base_protocol_init(struct scmi_handle *h)
{
int id, ret;
@@ -256,6 +350,12 @@ int scmi_base_protocol_init(struct scmi_handle *h)
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
+ scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
+ (4 * SCMI_PROTO_QUEUE_SZ),
+ &base_event_ops, base_events,
+ ARRAY_SIZE(base_events),
+ SCMI_BASE_NUM_SOURCES);
+
for (id = 0; id < rev->num_agents; id++) {
scmi_base_discover_agent_get(handle, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4c2227662b26..75e39882746e 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#include <linux/sort.h>
+
#include "common.h"
enum scmi_clock_protocol_cmd {
@@ -121,11 +123,23 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
return ret;
}
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
static int
scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_clock_info *clk)
{
- u64 *rate;
+ u64 *rate = NULL;
int ret, cnt;
bool rate_discrete = false;
u32 tot_rate_cnt = 0, rates_flag;
@@ -184,8 +198,10 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
*/
} while (num_returned && num_remaining);
- if (rate_discrete)
+ if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
+ sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ }
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 31fe5a22a011..c113e578cc6c 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -6,6 +6,8 @@
*
* Copyright (C) 2018 ARM Ltd.
*/
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
#include <linux/bitfield.h>
#include <linux/completion.h>
@@ -235,3 +237,5 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 7483cacf63f9..03ec74242c14 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "common.h"
+#include "notify.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
@@ -208,7 +209,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->rx_minfo;
+ ktime_t ts;
+ ts = ktime_get_boottime();
xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
@@ -221,6 +224,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
scmi_dump_header_dbg(dev, &xfer->hdr);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
+ scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
+ xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -392,8 +397,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
info->desc->ops->mark_txdone(cinfo, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.status);
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
return ret;
}
@@ -789,6 +793,9 @@ static int scmi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (scmi_notification_init(handle))
+ dev_err(dev, "SCMI Notifications NOT available.\n");
+
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -831,6 +838,8 @@ static int scmi_remove(struct platform_device *pdev)
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
+ scmi_notification_exit(&info->handle);
+
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
@@ -901,7 +910,7 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
-#ifdef CONFIG_ARM_PSCI_FW
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
{ /* Sentinel */ },
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
new file mode 100644
index 000000000000..4731daaacd19
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Notification support
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * SCMI Protocol specification allows the platform to signal events to
+ * interested agents via notification messages: this is an implementation
+ * of the dispatch and delivery of such notifications to the interested users
+ * inside the Linux kernel.
+ *
+ * An SCMI Notification core instance is initialized for each active platform
+ * instance identified by the means of the usual &struct scmi_handle.
+ *
+ * Each SCMI Protocol implementation, during its initialization, registers with
+ * this core its set of supported events using scmi_register_protocol_events():
+ * all the needed descriptors are stored in the &struct registered_protocols and
+ * &struct registered_events arrays.
+ *
+ * Kernel users interested in some specific event can register their callbacks
+ * providing the usual notifier_block descriptor, since this core implements
+ * events' delivery using the standard Kernel notification chains machinery.
+ *
+ * Given the number of possible events defined by SCMI and the extensibility
+ * of the SCMI Protocol itself, the underlying notification chains are created
+ * and destroyed dynamically on demand depending on the number of users
+ * effectively registered for an event, so that no support structures or chains
+ * are allocated until at least one user has registered a notifier_block for
+ * such event. Similarly, events' generation itself is enabled at the platform
+ * level only after at least one user has registered, and it is shutdown after
+ * the last user for that event has gone.
+ *
+ * All users provided callbacks and allocated notification-chains are stored in
+ * the @registered_events_handlers hashtable. Callbacks' registration requests
+ * for still to be registered events are instead kept in the dedicated common
+ * hashtable @pending_events_handlers.
+ *
+ * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
+ * and is served by its own dedicated notification chain; information contained
+ * in such tuples is used, in a few different ways, to generate the needed
+ * hash-keys.
+ *
+ * Here proto_id and evt_id are simply the protocol_id and message_id numbers
+ * as described in the SCMI Protocol specification, while src_id represents an
+ * optional, protocol dependent, source identifier (like domain_id, perf_id
+ * or sensor_id and so forth).
+ *
+ * Upon reception of a notification message from the platform the SCMI RX ISR
+ * passes the received message payload and some ancillary information (including
+ * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
+ * pushes the event-data itself on a protocol-dedicated kfifo queue for further
+ * deferred processing as specified in @scmi_events_dispatcher().
+ *
+ * Each protocol has it own dedicated work_struct and worker which, once kicked
+ * by the ISR, takes care to empty its own dedicated queue, deliverying the
+ * queued items into the proper notification-chain: notifications processing can
+ * proceed concurrently on distinct workers only between events belonging to
+ * different protocols while delivery of events within the same protocol is
+ * still strictly sequentially ordered by time of arrival.
+ *
+ * Events' information is then extracted from the SCMI Notification messages and
+ * conveyed, converted into a custom per-event report struct, as the void *data
+ * param to the user callback provided by the registered notifier_block, so that
+ * from the user perspective his callback will look invoked like:
+ *
+ * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
+ *
+ */
+
+#define dev_fmt(fmt) "SCMI Notifications - " fmt
+#define pr_fmt(fmt) "SCMI Notifications - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "notify.h"
+
+#define SCMI_MAX_PROTO 256
+
+#define PROTO_ID_MASK GENMASK(31, 24)
+#define EVT_ID_MASK GENMASK(23, 16)
+#define SRC_ID_MASK GENMASK(15, 0)
+
+/*
+ * Builds an unsigned 32bit key from the given input tuple to be used
+ * as a key in hashtables.
+ */
+#define MAKE_HASH_KEY(p, e, s) \
+ (FIELD_PREP(PROTO_ID_MASK, (p)) | \
+ FIELD_PREP(EVT_ID_MASK, (e)) | \
+ FIELD_PREP(SRC_ID_MASK, (s)))
+
+#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
+
+/*
+ * Assumes that the stored obj includes its own hash-key in a field named 'key':
+ * with this simplification this macro can be equally used for all the objects'
+ * types hashed by this implementation.
+ *
+ * @__ht: The hashtable name
+ * @__obj: A pointer to the object type to be retrieved from the hashtable;
+ * it will be used as a cursor while scanning the hastable and it will
+ * be possibly left as NULL when @__k is not found
+ * @__k: The key to search for
+ */
+#define KEY_FIND(__ht, __obj, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ typeof(__obj) obj_; \
+ \
+ hash_for_each_possible((__ht), obj_, hash, k_) \
+ if (obj_->key == k_) \
+ break; \
+ __obj = obj_; \
+})
+
+#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
+#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
+#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
+
+/*
+ * A set of macros used to access safely @registered_protocols and
+ * @registered_events arrays; these are fixed in size and each entry is possibly
+ * populated at protocols' registration time and then only read but NEVER
+ * modified or removed.
+ */
+#define SCMI_GET_PROTO(__ni, __pid) \
+({ \
+ typeof(__ni) ni_ = __ni; \
+ struct scmi_registered_events_desc *__pd = NULL; \
+ \
+ if (ni_) \
+ __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
+ __pd; \
+})
+
+#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
+({ \
+ typeof(__pd) pd_ = __pd; \
+ typeof(__eid) eid_ = __eid; \
+ struct scmi_registered_event *__revt = NULL; \
+ \
+ if (pd_ && eid_ < pd_->num_events) \
+ __revt = READ_ONCE(pd_->registered_events[eid_]); \
+ __revt; \
+})
+
+#define SCMI_GET_REVT(__ni, __pid, __eid) \
+({ \
+ struct scmi_registered_event *__revt; \
+ struct scmi_registered_events_desc *__pd; \
+ \
+ __pd = SCMI_GET_PROTO((__ni), (__pid)); \
+ __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
+ __revt; \
+})
+
+/* A couple of utility macros to limit cruft when calling protocols' helpers */
+#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->set_notify_enabled(r->proto->ni->handle, \
+ (eid), (sid), (state)); \
+})
+
+#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
+
+#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
+
+#define REVT_FILL_REPORT(revt, ...) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->fill_custom_report(r->proto->ni->handle, \
+ __VA_ARGS__); \
+})
+
+#define SCMI_PENDING_HASH_SZ 4
+#define SCMI_REGISTERED_HASH_SZ 6
+
+struct scmi_registered_events_desc;
+
+/**
+ * struct scmi_notify_instance - Represents an instance of the notification
+ * core
+ * @gid: GroupID used for devres
+ * @handle: A reference to the platform instance
+ * @init_work: A work item to perform final initializations of pending handlers
+ * @notify_wq: A reference to the allocated Kernel cmwq
+ * @pending_mtx: A mutex to protect @pending_events_handlers
+ * @registered_protocols: A statically allocated array containing pointers to
+ * all the registered protocol-level specific information
+ * related to events' handling
+ * @pending_events_handlers: An hashtable containing all pending events'
+ * handlers descriptors
+ *
+ * Each platform instance, represented by a handle, has its own instance of
+ * the notification subsystem represented by this structure.
+ */
+struct scmi_notify_instance {
+ void *gid;
+ struct scmi_handle *handle;
+ struct work_struct init_work;
+ struct workqueue_struct *notify_wq;
+ /* lock to protect pending_events_handlers */
+ struct mutex pending_mtx;
+ struct scmi_registered_events_desc **registered_protocols;
+ DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
+};
+
+/**
+ * struct events_queue - Describes a queue and its associated worker
+ * @sz: Size in bytes of the related kfifo
+ * @kfifo: A dedicated Kernel kfifo descriptor
+ * @notify_work: A custom work item bound to this queue
+ * @wq: A reference to the associated workqueue
+ *
+ * Each protocol has its own dedicated events_queue descriptor.
+ */
+struct events_queue {
+ size_t sz;
+ struct kfifo kfifo;
+ struct work_struct notify_work;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct scmi_event_header - A utility header
+ * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
+ * to this event as soon as it entered the SCMI RX ISR
+ * @payld_sz: Effective size of the embedded message payload which follows
+ * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
+ * @payld: A reference to the embedded event payload
+ *
+ * This header is prepended to each received event message payload before
+ * queueing it on the related &struct events_queue.
+ */
+struct scmi_event_header {
+ ktime_t timestamp;
+ size_t payld_sz;
+ unsigned char evt_id;
+ unsigned char payld[];
+};
+
+struct scmi_registered_event;
+
+/**
+ * struct scmi_registered_events_desc - Protocol Specific information
+ * @id: Protocol ID
+ * @ops: Protocol specific and event-related operations
+ * @equeue: The embedded per-protocol events_queue
+ * @ni: A reference to the initialized instance descriptor
+ * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
+ * deferred worker when fetching data from the kfifo
+ * @eh_sz: Size of the pre-allocated buffer @eh
+ * @in_flight: A reference to an in flight &struct scmi_registered_event
+ * @num_events: Number of events in @registered_events
+ * @registered_events: A dynamically allocated array holding all the registered
+ * events' descriptors, whose fixed-size is determined at
+ * compile time.
+ * @registered_mtx: A mutex to protect @registered_events_handlers
+ * @registered_events_handlers: An hashtable containing all events' handlers
+ * descriptors registered for this protocol
+ *
+ * All protocols that register at least one event have their protocol-specific
+ * information stored here, together with the embedded allocated events_queue.
+ * These descriptors are stored in the @registered_protocols array at protocol
+ * registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that, once
+ * we safely grab a NON-NULL reference from the array we can keep it and use it.
+ */
+struct scmi_registered_events_desc {
+ u8 id;
+ const struct scmi_event_ops *ops;
+ struct events_queue equeue;
+ struct scmi_notify_instance *ni;
+ struct scmi_event_header *eh;
+ size_t eh_sz;
+ void *in_flight;
+ int num_events;
+ struct scmi_registered_event **registered_events;
+ /* mutex to protect registered_events_handlers */
+ struct mutex registered_mtx;
+ DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
+};
+
+/**
+ * struct scmi_registered_event - Event Specific Information
+ * @proto: A reference to the associated protocol descriptor
+ * @evt: A reference to the associated event descriptor (as provided at
+ * registration time)
+ * @report: A pre-allocated buffer used by the deferred worker to fill a
+ * customized event report
+ * @num_sources: The number of possible sources for this event as stated at
+ * events' registration time
+ * @sources: A reference to a dynamically allocated array used to refcount the
+ * events' enable requests for all the existing sources
+ * @sources_mtx: A mutex to serialize the access to @sources
+ *
+ * All registered events are represented by one of these structures that are
+ * stored in the @registered_events array at protocol registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that once we
+ * safely grab a NON-NULL reference from the table we can keep it and use it.
+ */
+struct scmi_registered_event {
+ struct scmi_registered_events_desc *proto;
+ const struct scmi_event *evt;
+ void *report;
+ u32 num_sources;
+ refcount_t *sources;
+ /* locking to serialize the access to sources */
+ struct mutex sources_mtx;
+};
+
+/**
+ * struct scmi_event_handler - Event handler information
+ * @key: The used hashkey
+ * @users: A reference count for number of active users for this handler
+ * @r_evt: A reference to the associated registered event; when this is NULL
+ * this handler is pending, which means that identifies a set of
+ * callbacks intended to be attached to an event which is still not
+ * known nor registered by any protocol at that point in time
+ * @chain: The notification chain dedicated to this specific event tuple
+ * @hash: The hlist_node used for collision handling
+ * @enabled: A boolean which records if event's generation has been already
+ * enabled for this handler as a whole
+ *
+ * This structure collects all the information needed to process a received
+ * event identified by the tuple (proto_id, evt_id, src_id).
+ * These descriptors are stored in a per-protocol @registered_events_handlers
+ * table using as a key a value derived from that tuple.
+ */
+struct scmi_event_handler {
+ u32 key;
+ refcount_t users;
+ struct scmi_registered_event *r_evt;
+ struct blocking_notifier_head chain;
+ struct hlist_node hash;
+ bool enabled;
+};
+
+#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
+
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+
+/**
+ * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The key to use to lookup the related notification chain
+ * @report: The customized event-specific report to pass down to the callbacks
+ * as their *data parameter.
+ */
+static inline void
+scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
+ u32 evt_key, void *report)
+{
+ int ret;
+ struct scmi_event_handler *hndl;
+
+ /*
+ * Here ensure the event handler cannot vanish while using it.
+ * It is legitimate, though, for an handler not to be found at all here,
+ * e.g. when it has been unregistered by the user after some events had
+ * already been queued.
+ */
+ hndl = scmi_get_active_handler(ni, evt_key);
+ if (!hndl)
+ return;
+
+ ret = blocking_notifier_call_chain(&hndl->chain,
+ KEY_XTRACT_EVT_ID(evt_key),
+ report);
+ /* Notifiers are NOT supposed to cut the chain ... */
+ WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
+
+ scmi_put_active_handler(ni, hndl);
+}
+
+/**
+ * scmi_process_event_header() - Dequeue and process an event header
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ *
+ * Read an event header from the protocol queue into the dedicated scratch
+ * buffer and looks for a matching registered event; in case an anomalously
+ * sized read is detected just flush the queue.
+ *
+ * Return:
+ * * a reference to the matching registered event when found
+ * * ERR_PTR(-EINVAL) when NO registered event could be found
+ * * NULL when the queue is empty
+ */
+static inline struct scmi_registered_event *
+scmi_process_event_header(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd)
+{
+ unsigned int outs;
+ struct scmi_registered_event *r_evt;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh,
+ sizeof(struct scmi_event_header));
+ if (!outs)
+ return NULL;
+ if (outs != sizeof(struct scmi_event_header)) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return NULL;
+ }
+
+ r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
+ if (!r_evt)
+ r_evt = ERR_PTR(-EINVAL);
+
+ return r_evt;
+}
+
+/**
+ * scmi_process_event_payload() - Dequeue and process an event payload
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ * @r_evt: The registered event descriptor to use
+ *
+ * Read an event payload from the protocol queue into the dedicated scratch
+ * buffer, fills a custom report and then look for matching event handlers and
+ * call them; skip any unknown event (as marked by scmi_process_event_header())
+ * and in case an anomalously sized read is detected just flush the queue.
+ *
+ * Return: False when the queue is empty
+ */
+static inline bool
+scmi_process_event_payload(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd,
+ struct scmi_registered_event *r_evt)
+{
+ u32 src_id, key;
+ unsigned int outs;
+ void *report = NULL;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
+ if (!outs)
+ return false;
+
+ /* Any in-flight event has now been officially processed */
+ pd->in_flight = NULL;
+
+ if (outs != pd->eh->payld_sz) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return false;
+ }
+
+ if (IS_ERR(r_evt)) {
+ dev_warn(pd->ni->handle->dev,
+ "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
+ pd->eh->payld, pd->eh->payld_sz,
+ r_evt->report, &src_id);
+ if (!report) {
+ dev_err(pd->ni->handle->dev,
+ "report not available - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ /* At first search for a generic ALL src_ids handler... */
+ key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ /* ...then search for any specific src_id */
+ key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ return true;
+}
+
+/**
+ * scmi_events_dispatcher() - Common worker logic for all work items.
+ * @work: The work item to use, which is associated to a dedicated events_queue
+ *
+ * Logic:
+ * 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
+ * 2. generate a custom event report from the received event message
+ * 3. lookup for any registered ALL_SRC_IDs handler:
+ * - > call the related notification chain passing in the report
+ * 4. lookup for any registered specific SRC_ID handler:
+ * - > call the related notification chain passing in the report
+ *
+ * Note that:
+ * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
+ * flood of events cannot saturate other protocols' queues.
+ * * each per-protocol queue is associated to a distinct work_item, which
+ * means, in turn, that:
+ * + all protocols can process their dedicated queues concurrently
+ * (since notify_wq:max_active != 1)
+ * + anyway at most one worker instance is allowed to run on the same queue
+ * concurrently: this ensures that we can have only one concurrent
+ * reader/writer on the associated kfifo, so that we can use it lock-less
+ *
+ * Context: Process context.
+ */
+static void scmi_events_dispatcher(struct work_struct *work)
+{
+ struct events_queue *eq;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_registered_event *r_evt;
+
+ eq = container_of(work, struct events_queue, notify_work);
+ pd = container_of(eq, struct scmi_registered_events_desc, equeue);
+ /*
+ * In order to keep the queue lock-less and the number of memcopies
+ * to the bare minimum needed, the dispatcher accounts for the
+ * possibility of per-protocol in-flight events: i.e. an event whose
+ * reception could end up being split across two subsequent runs of this
+ * worker, first the header, then the payload.
+ */
+ do {
+ if (!pd->in_flight) {
+ r_evt = scmi_process_event_header(eq, pd);
+ if (!r_evt)
+ break;
+ pd->in_flight = r_evt;
+ } else {
+ r_evt = pd->in_flight;
+ }
+ } while (scmi_process_event_payload(eq, pd, r_evt));
+}
+
+/**
+ * scmi_notify() - Queues a notification for further deferred processing
+ * @handle: The handle identifying the platform instance from which the
+ * dispatched event is generated
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID (msgID)
+ * @buf: Event Message Payload (without the header)
+ * @len: Event Message Payload size
+ * @ts: RX Timestamp in nanoseconds (boottime)
+ *
+ * Context: Called in interrupt context to queue a received event for
+ * deferred processing.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_header eh;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return 0;
+ ni = handle->notify_priv;
+
+ r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
+ if (!r_evt)
+ return -EINVAL;
+
+ if (len > r_evt->evt->max_payld_sz) {
+ dev_err(handle->dev, "discard badly sized message\n");
+ return -EINVAL;
+ }
+ if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
+ dev_warn(handle->dev,
+ "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
+ proto_id, evt_id, ktime_to_ns(ts));
+ return -ENOMEM;
+ }
+
+ eh.timestamp = ts;
+ eh.evt_id = evt_id;
+ eh.payld_sz = len;
+ /*
+ * Header and payload are enqueued with two distinct kfifo_in() (so non
+ * atomic), but this situation is handled properly on the consumer side
+ * with in-flight events tracking.
+ */
+ kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
+ kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
+ /*
+ * Don't care about return value here since we just want to ensure that
+ * a work is queued all the times whenever some items have been pushed
+ * on the kfifo:
+ * - if work was already queued it will simply fail to queue a new one
+ * since it is not needed
+ * - if work was not queued already it will be now, even in case work
+ * was in fact already running: this behavior avoids any possible race
+ * when this function pushes new items onto the kfifos after the
+ * related executing worker had already determined the kfifo to be
+ * empty and it was terminating.
+ */
+ queue_work(r_evt->proto->equeue.wq,
+ &r_evt->proto->equeue.notify_work);
+
+ return 0;
+}
+
+/**
+ * scmi_kfifo_free() - Devres action helper to free the kfifo
+ * @kfifo: The kfifo to free
+ */
+static void scmi_kfifo_free(void *kfifo)
+{
+ kfifo_free((struct kfifo *)kfifo);
+}
+
+/**
+ * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
+ * @ni: A reference to the notification instance to use
+ * @equeue: The events_queue to initialize
+ * @sz: Size of the kfifo buffer to allocate
+ *
+ * Allocate a buffer for the kfifo and initialize it.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
+ struct events_queue *equeue, size_t sz)
+{
+ int ret;
+
+ if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
+ return -ENOMEM;
+ /* Size could have been roundup to power-of-two */
+ equeue->sz = kfifo_size(&equeue->kfifo);
+
+ ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
+ &equeue->kfifo);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
+ equeue->wq = ni->notify_wq;
+
+ return ret;
+}
+
+/**
+ * scmi_allocate_registered_events_desc() - Allocate a registered events'
+ * descriptor
+ * @ni: A reference to the &struct scmi_notify_instance notification instance
+ * to use
+ * @proto_id: Protocol ID
+ * @queue_sz: Size of the associated queue to allocate
+ * @eh_sz: Size of the event header scratch area to pre-allocate
+ * @num_events: Number of events to support (size of @registered_events)
+ * @ops: Pointer to a struct holding references to protocol specific helpers
+ * needed during events handling
+ *
+ * It is supposed to be called only once for each protocol at protocol
+ * initialization time, so it warns if the requested protocol is found already
+ * registered.
+ *
+ * Return: The allocated and registered descriptor on Success
+ */
+static struct scmi_registered_events_desc *
+scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
+ u8 proto_id, size_t queue_sz, size_t eh_sz,
+ int num_events,
+ const struct scmi_event_ops *ops)
+{
+ int ret;
+ struct scmi_registered_events_desc *pd;
+
+ /* Ensure protocols are up to date */
+ smp_rmb();
+ if (WARN_ON(ni->registered_protocols[proto_id]))
+ return ERR_PTR(-EINVAL);
+
+ pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+ pd->id = proto_id;
+ pd->ops = ops;
+ pd->ni = ni;
+
+ ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
+ if (!pd->eh)
+ return ERR_PTR(-ENOMEM);
+ pd->eh_sz = eh_sz;
+
+ pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
+ sizeof(char *), GFP_KERNEL);
+ if (!pd->registered_events)
+ return ERR_PTR(-ENOMEM);
+ pd->num_events = num_events;
+
+ /* Initialize per protocol handlers table */
+ mutex_init(&pd->registered_mtx);
+ hash_init(pd->registered_events_handlers);
+
+ return pd;
+}
+
+/**
+ * scmi_register_protocol_events() - Register Protocol Events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * the protocol's events are registered
+ * @proto_id: Protocol ID
+ * @queue_sz: Size in bytes of the associated queue to be allocated
+ * @ops: Protocol specific event-related operations
+ * @evt: Event descriptor array
+ * @num_events: Number of events in @evt array
+ * @num_sources: Number of possible sources for this protocol on this
+ * platform.
+ *
+ * Used by SCMI Protocols initialization code to register with the notification
+ * core the list of supported events and their descriptors: takes care to
+ * pre-allocate and store all needed descriptors, scratch buffers and event
+ * queues.
+ *
+ * Return: 0 on Success
+ */
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources)
+{
+ int i;
+ size_t payld_sz = 0;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_notify_instance *ni;
+
+ if (!ops || !evt)
+ return -EINVAL;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENOMEM;
+ ni = handle->notify_priv;
+
+ /* Attach to the notification main devres group */
+ if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < num_events; i++)
+ payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
+ payld_sz += sizeof(struct scmi_event_header);
+
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
+ payld_sz, num_events, ops);
+ if (IS_ERR(pd))
+ goto err;
+
+ for (i = 0; i < num_events; i++, evt++) {
+ struct scmi_registered_event *r_evt;
+
+ r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
+ GFP_KERNEL);
+ if (!r_evt)
+ goto err;
+ r_evt->proto = pd;
+ r_evt->evt = evt;
+
+ r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
+ sizeof(refcount_t), GFP_KERNEL);
+ if (!r_evt->sources)
+ goto err;
+ r_evt->num_sources = num_sources;
+ mutex_init(&r_evt->sources_mtx);
+
+ r_evt->report = devm_kzalloc(ni->handle->dev,
+ evt->max_report_sz, GFP_KERNEL);
+ if (!r_evt->report)
+ goto err;
+
+ pd->registered_events[i] = r_evt;
+ /* Ensure events are updated */
+ smp_wmb();
+ dev_dbg(handle->dev, "registered event - %lX\n",
+ MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
+ }
+
+ /* Register protocol and events...it will never be removed */
+ ni->registered_protocols[proto_id] = pd;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ /*
+ * Finalize any pending events' handler which could have been waiting
+ * for this protocol's events registration.
+ */
+ schedule_work(&ni->init_work);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
+ /* A failing protocol registration does not trigger full failure */
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ return -ENOMEM;
+}
+
+/**
+ * scmi_allocate_event_handler() - Allocate Event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: 32bit key uniquely bind to the event identified by the tuple
+ * (proto_id, evt_id, src_id)
+ *
+ * Allocate an event handler and related notification chain associated with
+ * the provided event handler key.
+ * Note that, at this point, a related registered_event is still to be
+ * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
+ * is initialized as pending.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ * Return: the freshly allocated structure on Success
+ */
+static struct scmi_event_handler *
+scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_event_handler *hndl;
+
+ hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
+ if (!hndl)
+ return NULL;
+ hndl->key = evt_key;
+ BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
+ refcount_set(&hndl->users, 1);
+ /* New handlers are created pending */
+ hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
+
+ return hndl;
+}
+
+/**
+ * scmi_free_event_handler() - Free the provided Event handler
+ * @hndl: The event handler structure to free
+ *
+ * Context: Assumes to be called with proper locking acquired depending
+ * on the situation.
+ */
+static void scmi_free_event_handler(struct scmi_event_handler *hndl)
+{
+ hash_del(&hndl->hash);
+ kfree(hndl);
+}
+
+/**
+ * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to bind
+ *
+ * If an associated registered event is found, move the handler from the pending
+ * into the registered table.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
+ KEY_XTRACT_EVT_ID(hndl->key));
+ if (!r_evt)
+ return -EINVAL;
+
+ /* Remove from pending and insert into registered */
+ hash_del(&hndl->hash);
+ hndl->r_evt = r_evt;
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hash_add(r_evt->proto->registered_events_handlers,
+ &hndl->hash, hndl->key);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+
+ return 0;
+}
+
+/**
+ * scmi_valid_pending_handler() - Helper to check pending status of handlers
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to check
+ *
+ * An handler is considered pending when its r_evt == NULL, because the related
+ * event was still unknown at handler's registration time; anyway, since all
+ * protocols register their supported events once for all at protocols'
+ * initialization time, a pending handler cannot be considered valid anymore if
+ * the underlying event (which it is waiting for), belongs to an already
+ * initialized and registered protocol.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_events_desc *pd;
+
+ if (!IS_HNDL_PENDING(hndl))
+ return -EINVAL;
+
+ pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
+ if (pd)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * scmi_register_event_handler() - Register whenever possible an Event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to register
+ *
+ * At first try to bind an event handler to its associated event, then check if
+ * it was at least a valid pending handler: if it was not bound nor valid return
+ * false.
+ *
+ * Valid pending incomplete bindings will be periodically retried by a dedicated
+ * worker which is kicked each time a new protocol completes its own
+ * registration phase.
+ *
+ * Context: Assumes to be called with @pending_mtx acquired.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
+ hndl->key);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (!ret)
+ dev_dbg(ni->handle->dev,
+ "registered PENDING handler - key:%X\n",
+ hndl->key);
+ }
+
+ return ret;
+}
+
+/**
+ * __scmi_event_handler_get_ops() - Utility to get or create an event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ * @create: A boolean flag to specify if a handler must be created when
+ * not already existent
+ *
+ * Search for the desired handler matching the key in both the per-protocol
+ * registered table and the common pending table:
+ * * if found adjust users refcount
+ * * if not found and @create is true, create and register the new handler:
+ * handler could end up being registered as pending if no matching event
+ * could be found.
+ *
+ * An handler is guaranteed to reside in one and only one of the tables at
+ * any one time; to ensure this the whole search and create is performed
+ * holding the @pending_mtx lock, with @registered_mtx additionally acquired
+ * if needed.
+ *
+ * Note that when a nested acquisition of these mutexes is needed the locking
+ * order is always (same as in @init_work):
+ * 1. pending_mtx
+ * 2. registered_mtx
+ *
+ * Events generation is NOT enabled right after creation within this routine
+ * since at creation time we usually want to have all setup and ready before
+ * events really start flowing.
+ *
+ * Return: A properly refcounted handler on Success, NULL on Failure
+ */
+static inline struct scmi_event_handler *
+__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
+ u32 evt_key, bool create)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+
+ mutex_lock(&ni->pending_mtx);
+ /* Search registered events at first ... if possible at all */
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ /* ...then amongst pending. */
+ if (!hndl) {
+ hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ }
+
+ /* Create if still not found and required */
+ if (!hndl && create) {
+ hndl = scmi_allocate_event_handler(ni, evt_key);
+ if (hndl && scmi_register_event_handler(ni, hndl)) {
+ dev_dbg(ni->handle->dev,
+ "purging UNKNOWN handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ hndl = NULL;
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+
+ return hndl;
+}
+
+static struct scmi_event_handler *
+scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, false);
+}
+
+static struct scmi_event_handler *
+scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, true);
+}
+
+/**
+ * scmi_get_active_handler() - Helper to get active handlers only
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ *
+ * Search for the desired handler matching the key only in the per-protocol
+ * table of registered handlers: this is called only from the dispatching path
+ * so want to be as quick as possible and do not care about pending.
+ *
+ * Return: A properly refcounted active handler
+ */
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ return hndl;
+}
+
+/**
+ * __scmi_enable_evt() - Enable/disable events generation
+ * @r_evt: The registered event to act upon
+ * @src_id: The src_id to act upon
+ * @enable: The action to perform: true->Enable, false->Disable
+ *
+ * Takes care of proper refcounting while performing enable/disable: handles
+ * the special case of ALL sources requests by itself.
+ * Returns successfully if at least one of the required src_id has been
+ * successfully enabled/disabled.
+ *
+ * Return: 0 on Success
+ */
+static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
+ u32 src_id, bool enable)
+{
+ int retvals = 0;
+ u32 num_sources;
+ refcount_t *sid;
+
+ if (src_id == SRC_ID_MASK) {
+ src_id = 0;
+ num_sources = r_evt->num_sources;
+ } else if (src_id < r_evt->num_sources) {
+ num_sources = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&r_evt->sources_mtx);
+ if (enable) {
+ for (; num_sources; src_id++, num_sources--) {
+ int ret = 0;
+
+ sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == 0) {
+ ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
+ src_id);
+ if (!ret)
+ refcount_set(sid, 1);
+ } else {
+ refcount_inc(sid);
+ }
+ retvals += !ret;
+ }
+ } else {
+ for (; num_sources; src_id++, num_sources--) {
+ sid = &r_evt->sources[src_id];
+ if (refcount_dec_and_test(sid))
+ REVT_NOTIFY_DISABLE(r_evt,
+ r_evt->evt->id, src_id);
+ }
+ retvals = 1;
+ }
+ mutex_unlock(&r_evt->sources_mtx);
+
+ return retvals ? 0 : -EINVAL;
+}
+
+static int scmi_enable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (!hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), true);
+ if (!ret)
+ hndl->enabled = true;
+ }
+
+ return ret;
+}
+
+static int scmi_disable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), false);
+ if (!ret)
+ hndl->enabled = false;
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_put_handler_unlocked() - Put an event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to act upon
+ *
+ * After having got exclusive access to the registered handlers hashtable,
+ * update the refcount and if @hndl is no more in use by anyone:
+ * * ask for events' generation disabling
+ * * unregister and free the handler itself
+ *
+ * Context: Assumes all the proper locking has been managed by the caller.
+ */
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ if (refcount_dec_and_test(&hndl->users)) {
+ if (!IS_HNDL_PENDING(hndl))
+ scmi_disable_events(hndl);
+ scmi_free_event_handler(hndl);
+ }
+}
+
+static void scmi_put_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&ni->pending_mtx);
+ if (r_evt)
+ mutex_lock(&r_evt->proto->registered_mtx);
+
+ scmi_put_handler_unlocked(ni, hndl);
+
+ if (r_evt)
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ mutex_unlock(&ni->pending_mtx);
+}
+
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&r_evt->proto->registered_mtx);
+ scmi_put_handler_unlocked(ni, hndl);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+}
+
+/**
+ * scmi_event_handler_enable_events() - Enable events associated to an handler
+ * @hndl: The Event handler to act upon
+ *
+ * Return: 0 on Success
+ */
+static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
+{
+ if (scmi_enable_events(hndl)) {
+ pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_register_notifier() - Register a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is registered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic helper to register a notifier_block against a protocol event.
+ *
+ * A notifier_block @nb will be registered for each distinct event identified
+ * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
+ * so that:
+ *
+ * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
+ *
+ * @src_id meaning is protocol specific and identifies the origin of the event
+ * (like domain_id, sensor_id and so forth).
+ *
+ * @src_id can be NULL to signify that the caller is interested in receiving
+ * notifications from ALL the available sources for that protocol OR simply that
+ * the protocol does not support distinct sources.
+ *
+ * As soon as one user for the specified tuple appears, an handler is created,
+ * and that specific event's generation is enabled at the platform level, unless
+ * an associated registered event is found missing, meaning that the needed
+ * protocol is still to be initialized and the handler has just been registered
+ * as still pending.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret = 0;
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_or_create_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ blocking_notifier_chain_register(&hndl->chain, nb);
+
+ /* Enable events for not pending handlers */
+ if (!IS_HNDL_PENDING(hndl)) {
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret)
+ scmi_put_handler(ni, hndl);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_unregister_notifier() - Unregister a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is unregistered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID
+ * @nb: The notifier_block to unregister
+ *
+ * Takes care to unregister the provided @nb from the notification chain
+ * associated to the specified event and, if there are no more users for the
+ * event handler, frees also the associated event handler structures.
+ * (this could possibly cause disabling of event's generation at platform level)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_unregister_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ /*
+ * Note that this chain unregistration call is safe on its own
+ * being internally protected by an rwsem.
+ */
+ blocking_notifier_chain_unregister(&hndl->chain, nb);
+ scmi_put_handler(ni, hndl);
+
+ /*
+ * This balances the initial get issued in @scmi_register_notifier.
+ * If this notifier_block happened to be the last known user callback
+ * for this event, the handler is here freed and the event's generation
+ * stopped.
+ *
+ * Note that, an ongoing concurrent lookup on the delivery workqueue
+ * path could still hold the refcount to 1 even after this routine
+ * completes: in such a case it will be the final put on the delivery
+ * path which will finally free this unused handler.
+ */
+ scmi_put_handler(ni, hndl);
+
+ return 0;
+}
+
+/**
+ * scmi_protocols_late_init() - Worker for late initialization
+ * @work: The work item to use associated to the proper SCMI instance
+ *
+ * This kicks in whenever a new protocol has completed its own registration via
+ * scmi_register_protocol_events(): it is in charge of scanning the table of
+ * pending handlers (registered by users while the related protocol was still
+ * not initialized) and finalizing their initialization whenever possible;
+ * invalid pending handlers are purged at this point in time.
+ */
+static void scmi_protocols_late_init(struct work_struct *work)
+{
+ int bkt;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+ struct hlist_node *tmp;
+
+ ni = container_of(work, struct scmi_notify_instance, init_work);
+
+ /* Ensure protocols and events are up to date */
+ smp_rmb();
+
+ mutex_lock(&ni->pending_mtx);
+ hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev,
+ "finalized PENDING handler - key:%X\n",
+ hndl->key);
+ ret = scmi_event_handler_enable_events(hndl);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ }
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+}
+
+/*
+ * notify_ops are attached to the handle so that can be accessed
+ * directly from an scmi_driver to register its own notifiers.
+ */
+static struct scmi_notify_ops notify_ops = {
+ .register_event_notifier = scmi_register_notifier,
+ .unregister_event_notifier = scmi_unregister_notifier,
+};
+
+/**
+ * scmi_notification_init() - Initializes Notification Core Support
+ * @handle: The handle identifying the platform instance to initialize
+ *
+ * This function lays out all the basic resources needed by the notification
+ * core instance identified by the provided handle: once done, all of the
+ * SCMI Protocols can register their events with the core during their own
+ * initializations.
+ *
+ * Note that failing to initialize the core notifications support does not
+ * cause the whole SCMI Protocols stack to fail its initialization.
+ *
+ * SCMI Notification Initialization happens in 2 steps:
+ * * initialization: basic common allocations (this function)
+ * * registration: protocols asynchronously come into life and registers their
+ * own supported list of events with the core; this causes
+ * further per-protocol allocations
+ *
+ * Any user's callback registration attempt, referring a still not registered
+ * event, will be registered as pending and finalized later (if possible)
+ * by scmi_protocols_late_init() work.
+ * This allows for lazy initialization of SCMI Protocols due to late (or
+ * missing) SCMI drivers' modules loading.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notification_init(struct scmi_handle *handle)
+{
+ void *gid;
+ struct scmi_notify_instance *ni;
+
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ goto err;
+
+ ni->gid = gid;
+ ni->handle = handle;
+
+ ni->notify_wq = alloc_workqueue("scmi_notify",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!ni->notify_wq)
+ goto err;
+
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
+ mutex_init(&ni->pending_mtx);
+ hash_init(ni->pending_events_handlers);
+
+ INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+
+ handle->notify_ops = &notify_ops;
+ handle->notify_priv = ni;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ dev_info(handle->dev, "Core Enabled.\n");
+
+ devres_close_group(handle->dev, ni->gid);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Initialization Failed.\n");
+ devres_release_group(handle->dev, NULL);
+ return -ENOMEM;
+}
+
+/**
+ * scmi_notification_exit() - Shutdown and clean Notification core
+ * @handle: The handle identifying the platform instance to shutdown
+ */
+void scmi_notification_exit(struct scmi_handle *handle)
+{
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return;
+ ni = handle->notify_priv;
+
+ handle->notify_priv = NULL;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ /* Destroy while letting pending work complete */
+ destroy_workqueue(ni->notify_wq);
+
+ devres_release_group(ni->handle->dev, ni->gid);
+}
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
new file mode 100644
index 000000000000..3485f20fa70e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * notification header file containing some definitions, structures
+ * and function prototypes related to SCMI Notification handling.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef _SCMI_NOTIFY_H
+#define _SCMI_NOTIFY_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define SCMI_PROTO_QUEUE_SZ 4096
+
+/**
+ * struct scmi_event - Describes an event to be supported
+ * @id: Event ID
+ * @max_payld_sz: Max possible size for the payload of a notification message
+ * @max_report_sz: Max possible size for the report of a notification message
+ *
+ * Each SCMI protocol, during its initialization phase, can describe the events
+ * it wishes to support in a few struct scmi_event and pass them to the core
+ * using scmi_register_protocol_events().
+ */
+struct scmi_event {
+ u8 id;
+ size_t max_payld_sz;
+ size_t max_report_sz;
+};
+
+/**
+ * struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
+ * using the proper custom protocol commands.
+ * Return 0 on Success
+ * @fill_custom_report: fills a custom event report from the provided
+ * event message payld identifying the event
+ * specific src_id.
+ * Return NULL on failure otherwise @report now fully
+ * populated
+ *
+ * Context: Helpers described in &struct scmi_event_ops are called only in
+ * process context.
+ */
+struct scmi_event_ops {
+ int (*set_notify_enabled)(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enabled);
+ void *(*fill_custom_report)(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id);
+};
+
+int scmi_notification_init(struct scmi_handle *handle);
+void scmi_notification_exit(struct scmi_handle *handle);
+
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources);
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts);
+
+#endif /* _SCMI_NOTIFY_H */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index eadc171e254b..3e1e87012c95 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,15 +5,19 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
#include <linux/sort.h>
#include "common.h"
+#include "notify.h"
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
@@ -27,11 +31,6 @@ enum scmi_performance_protocol_cmd {
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
-enum scmi_performance_protocol_notify {
- PERFORMANCE_LIMITS_CHANGED = 0x0,
- PERFORMANCE_LEVEL_CHANGED = 0x1,
-};
-
struct scmi_opp {
u32 perf;
u32 power;
@@ -86,6 +85,19 @@ struct scmi_perf_notify_level_or_limits {
__le32 notify_enable;
};
+struct scmi_perf_limits_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 range_max;
+ __le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 performance_level;
+};
+
struct scmi_msg_resp_perf_describe_levels {
__le16 num_returned;
__le16 num_remaining;
@@ -158,6 +170,11 @@ struct scmi_perf_info {
struct perf_dom_info *dom_info;
};
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+ PERF_NOTIFY_LIMITS,
+ PERF_NOTIFY_LEVEL,
+};
+
static int scmi_perf_attributes_get(const struct scmi_handle *handle,
struct scmi_perf_info *pi)
{
@@ -488,6 +505,29 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return scmi_perf_mb_level_get(handle, domain, level, poll);
}
+static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+ u32 domain, int message_id,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_notify_level_or_limits *notify;
+
+ ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
{
if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
@@ -697,6 +737,17 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
+static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+ struct device *dev)
+{
+ struct perf_dom_info *dom;
+ struct scmi_perf_info *pi = handle->perf_priv;
+
+ dom = pi->dom_info + scmi_dev_domain_id(dev);
+
+ return dom->fc_info && dom->fc_info->level_set_addr;
+}
+
static struct scmi_perf_ops perf_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
@@ -708,6 +759,90 @@ static struct scmi_perf_ops perf_ops = {
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
+ .fast_switch_possible = scmi_fast_switch_possible,
+};
+
+static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+ {
+ const struct scmi_perf_limits_notify_payld *p = payld;
+ struct scmi_perf_limits_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->range_max = le32_to_cpu(p->range_max);
+ r->range_min = le32_to_cpu(p->range_min);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+ {
+ const struct scmi_perf_level_notify_payld *p = payld;
+ struct scmi_perf_level_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->performance_level = le32_to_cpu(p->performance_level);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static const struct scmi_event perf_events[] = {
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_limits_report),
+ },
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_level_report),
+ },
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+ .set_notify_enabled = scmi_perf_set_notify_enabled,
+ .fill_custom_report = scmi_perf_fill_custom_report,
};
static int scmi_perf_protocol_init(struct scmi_handle *handle)
@@ -742,6 +877,12 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
+ &perf_event_ops, perf_events,
+ ARRAY_SIZE(perf_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->perf_ops = &perf_ops;
handle->perf_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index cf7f0312381b..46f213644c49 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -5,19 +5,18 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
- POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7,
-};
-
-enum scmi_power_protocol_notify {
- POWER_STATE_CHANGED = 0x0,
- POWER_STATE_CHANGE_REQUESTED = 0x1,
};
struct scmi_msg_resp_power_attributes {
@@ -48,6 +47,12 @@ struct scmi_power_state_notify {
__le32 notify_enable;
};
+struct scmi_power_state_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_state;
+};
+
struct power_dom_info {
bool state_set_sync;
bool state_set_async;
@@ -186,6 +191,75 @@ static struct scmi_power_ops power_ops = {
.state_get = scmi_power_state_get,
};
+static int scmi_power_request_notify(const struct scmi_handle *handle,
+ u32 domain, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_state_notify *notify;
+
+ ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
+ SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_power_request_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_power_state_notify_payld *p = payld;
+ struct scmi_power_state_changed_report *r = report;
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_state = le32_to_cpu(p->power_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event power_events[] = {
+ {
+ .id = SCMI_EVENT_POWER_STATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_power_state_changed_report),
+ },
+};
+
+static const struct scmi_event_ops power_event_ops = {
+ .set_notify_enabled = scmi_power_set_notify_enabled,
+ .fill_custom_report = scmi_power_fill_custom_report,
+};
+
static int scmi_power_protocol_init(struct scmi_handle *handle)
{
int domain;
@@ -214,6 +288,12 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
scmi_power_domain_attributes_get(handle, domain, dom);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
+ &power_event_ops, power_events,
+ ARRAY_SIZE(power_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->power_ops = &power_ops;
handle->power_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index de73054554f3..3691bafca057 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -5,7 +5,12 @@
* Copyright (C) 2019 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
@@ -13,10 +18,6 @@ enum scmi_reset_protocol_cmd {
RESET_NOTIFY = 0x5,
};
-enum scmi_reset_protocol_notify {
- RESET_ISSUED = 0x0,
-};
-
#define NUM_RESET_DOMAIN_MASK 0xffff
#define RESET_NOTIFY_ENABLE BIT(0)
@@ -40,6 +41,18 @@ struct scmi_msg_reset_domain_reset {
#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
};
+struct scmi_msg_reset_notify {
+ __le32 id;
+ __le32 event_control;
+#define RESET_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_reset_issued_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 reset_state;
+};
+
struct reset_dom_info {
bool async_reset;
bool reset_notify;
@@ -190,6 +203,75 @@ static struct scmi_reset_ops reset_ops = {
.deassert = scmi_reset_domain_deassert,
};
+static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
+ bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
+ SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(domain_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_reset_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_reset_issued_notify_payld *p = payld;
+ struct scmi_reset_issued_report *r = report;
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->reset_state = le32_to_cpu(p->reset_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event reset_events[] = {
+ {
+ .id = SCMI_EVENT_RESET_ISSUED,
+ .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
+ .max_report_sz = sizeof(struct scmi_reset_issued_report),
+ },
+};
+
+static const struct scmi_event_ops reset_event_ops = {
+ .set_notify_enabled = scmi_reset_set_notify_enabled,
+ .fill_custom_report = scmi_reset_fill_custom_report,
+};
+
static int scmi_reset_protocol_init(struct scmi_handle *handle)
{
int domain;
@@ -218,6 +300,12 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
scmi_reset_domain_attributes_get(handle, domain, dom);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
+ &reset_event_ops, reset_events,
+ ARRAY_SIZE(reset_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->reset_ops = &reset_ops;
handle->reset_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index bafbfe358f97..9e44479f0284 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
- domains[i] = &scmi_pd->genpd;
+ if (handle->power_ops->state_get(handle, i, &state)) {
+ dev_warn(dev, "failed to get state for domain %d\n", i);
+ continue;
+ }
scmi_pd->domain = i;
scmi_pd->handle = handle;
@@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- if (handle->power_ops->state_get(handle, i, &state)) {
- dev_warn(dev, "failed to get state for domain %d\n", i);
- continue;
- }
-
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
+
+ domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index db1b1ab303da..1af0ad362e82 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -5,7 +5,12 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
@@ -14,10 +19,6 @@ enum scmi_sensor_protocol_cmd {
SENSOR_READING_GET = 0x6,
};
-enum scmi_sensor_protocol_notify {
- SENSOR_TRIP_POINT_EVENT = 0x0,
-};
-
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
@@ -71,6 +72,12 @@ struct scmi_msg_sensor_reading_get {
#define SENSOR_READ_ASYNC BIT(0)
};
+struct scmi_sensor_trip_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ __le32 trip_point_desc;
+};
+
struct sensors_info {
u32 version;
int num_sensors;
@@ -271,11 +278,57 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
- .trip_point_notify = scmi_sensor_trip_point_notify,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
};
+static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
+
+ if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
+ sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+
+ return r;
+}
+
+static const struct scmi_event sensor_events[] = {
+ {
+ .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
+ .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
+ .max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
+ },
+};
+
+static const struct scmi_event_ops sensor_event_ops = {
+ .set_notify_enabled = scmi_sensor_set_notify_enabled,
+ .fill_custom_report = scmi_sensor_fill_custom_report,
+};
+
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
@@ -299,6 +352,12 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
scmi_sensor_description_get(handle, sinfo);
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
+ &sensor_event_ops, sensor_events,
+ ARRAY_SIZE(sensor_events),
+ sinfo->num_sensors);
+
sinfo->version = version;
handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 49bc4b0e8428..a1537d123e38 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -21,6 +21,7 @@
*
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
* @func_id: smc/hvc call function id
*/
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 08bc9ddfbdfb..b76acbade2a0 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
-obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index db655e87cdc8..d9dcc20945c6 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -10,6 +10,7 @@
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/mailbox_client.h>
+#include <linux/suspend.h>
#define IMX_SC_IRQ_FUNC_ENABLE 1
#define IMX_SC_IRQ_FUNC_STATUS 2
@@ -91,6 +92,7 @@ static void imx_scu_irq_work_handler(struct work_struct *work)
if (!irq_status)
continue;
+ pm_system_wakeup();
imx_scu_irq_notifier_call_chain(irq_status, &i);
}
}
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/firmware/imx/imx-scu-soc.c
index 20d37eaeb5f2..2f32353de2c9 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/firmware/imx/imx-scu-soc.c
@@ -10,9 +10,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
-
-static struct imx_sc_ipc *soc_ipc_handle;
+static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
struct imx_sc_msg_misc_get_soc_id {
struct imx_sc_rpc_msg hdr;
@@ -44,7 +42,7 @@ static int imx_scu_soc_uid(u64 *soc_uid)
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
@@ -71,7 +69,7 @@ static int imx_scu_soc_id(void)
msg.data.req.control = IMX_SC_C_ID;
msg.data.req.resource = IMX_SC_R_SYSTEM;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
return ret;
@@ -80,7 +78,7 @@ static int imx_scu_soc_id(void)
return msg.data.resp.id;
}
-static int imx_scu_soc_probe(struct platform_device *pdev)
+int imx_scu_soc_init(struct device *dev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
@@ -88,11 +86,11 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
u64 uid = 0;
u32 val;
- ret = imx_scu_get_handle(&soc_ipc_handle);
+ ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle);
if (ret)
return ret;
- soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr),
GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -115,73 +113,26 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
/* format soc_id value passed from SCU firmware */
val = id & 0x1f;
- soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
+ soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val);
if (!soc_dev_attr->soc_id)
return -ENOMEM;
/* format revision value passed from SCU firmware */
val = (id >> 5) & 0xf;
val = (((val >> 2) + 1) << 4) | (val & 0x3);
- soc_dev_attr->revision = kasprintf(GFP_KERNEL,
- "%d.%d",
- (val >> 4) & 0xf,
- val & 0xf);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
- goto free_soc_id;
- }
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (val >> 4) & 0xf, val & 0xf);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
- soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", uid);
- if (!soc_dev_attr->serial_number) {
- ret = -ENOMEM;
- goto free_revision;
- }
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL,
+ "%016llX", uid);
+ if (!soc_dev_attr->serial_number)
+ return -ENOMEM;
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
- goto free_serial_number;
- }
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
return 0;
-
-free_serial_number:
- kfree(soc_dev_attr->serial_number);
-free_revision:
- kfree(soc_dev_attr->revision);
-free_soc_id:
- kfree(soc_dev_attr->soc_id);
- return ret;
-}
-
-static struct platform_driver imx_scu_soc_driver = {
- .driver = {
- .name = IMX_SCU_SOC_DRIVER_NAME,
- },
- .probe = imx_scu_soc_probe,
-};
-
-static int __init imx_scu_soc_init(void)
-{
- struct platform_device *pdev;
- struct device_node *np;
- int ret;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
- if (!np)
- return -ENODEV;
-
- of_node_put(np);
-
- ret = platform_driver_register(&imx_scu_soc_driver);
- if (ret)
- return ret;
-
- pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
- -1, NULL, 0);
- if (IS_ERR(pdev))
- platform_driver_unregister(&imx_scu_soc_driver);
-
- return PTR_ERR_OR_ZERO(pdev);
}
-device_initcall(imx_scu_soc_init);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 2ab048222fe9..dca79caccd01 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -328,6 +328,10 @@ static int imx_scu_probe(struct platform_device *pdev)
imx_sc_ipc_handle = sc_ipc;
+ ret = imx_scu_soc_init(dev);
+ if (ret)
+ dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
+
ret = imx_scu_enable_general_irq_channel(dev);
if (ret)
dev_warn(dev,
diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
new file mode 100644
index 000000000000..a12db6ff323b
--- /dev/null
+++ b/drivers/firmware/imx/rm.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ *
+ * File containing client-side RPC functions for the RM service. These
+ * function are ported to clients that communicate to the SC.
+ */
+
+#include <linux/firmware/imx/svc/rm.h>
+
+struct imx_sc_msg_rm_rsrc_owned {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+} __packed __aligned(4);
+
+/*
+ * This function check @resource is owned by current partition or not
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ *
+ * @return Returns 0 for not owned and 1 for owned.
+ */
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ struct imx_sc_msg_rm_rsrc_owned msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED;
+ hdr->size = 2;
+
+ msg.resource = resource;
+
+ /*
+ * SCU firmware only returns value 0 or 1
+ * for resource owned check which means not owned or owned.
+ * So it is always successful.
+ */
+ imx_scu_call_rpc(ipc, &msg, true);
+
+ return hdr->func;
+}
+EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index fb5523aa16ee..af3d6d9ead28 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -167,8 +167,18 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
/* CM40 SS */
- { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
- { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
+ { "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
+ { "cm40-intmux", IMX_SC_R_M4_0_INTMUX, 1, false, 0 },
+ { "cm40-pid", IMX_SC_R_M4_0_PID0, 5, true, 0},
+ { "cm40-mu-a1", IMX_SC_R_M4_0_MU_1A, 1, false, 0},
+ { "cm40-lpuart", IMX_SC_R_M4_0_UART, 1, false, 0},
+
+ /* CM41 SS */
+ { "cm41-i2c", IMX_SC_R_M4_1_I2C, 1, false, 0 },
+ { "cm41-intmux", IMX_SC_R_M4_1_INTMUX, 1, false, 0 },
+ { "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
+ { "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
+ { "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 0e7233a20f34..8393bb3265cc 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -391,7 +391,7 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
- return qcom_scm_call(__scm->dev, &desc, NULL);
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
static void qcom_scm_set_download_mode(bool enable)
@@ -650,7 +650,7 @@ int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
int ret;
- ret = qcom_scm_call(__scm->dev, &desc, &res);
+ ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
if (ret >= 0)
*val = res.result[0];
@@ -669,8 +669,7 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
.owner = ARM_SMCCC_OWNER_SIP,
};
-
- return qcom_scm_call(__scm->dev, &desc, NULL);
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_io_writel);
@@ -1151,6 +1150,7 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8994" },
{ .compatible = "qcom,scm-msm8996" },
{ .compatible = "qcom,scm" },
{}
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
index 27b675d76235..15e7466179a6 100644
--- a/drivers/firmware/smccc/Kconfig
+++ b/drivers/firmware/smccc/Kconfig
@@ -14,3 +14,12 @@ config HAVE_ARM_SMCCC_DISCOVERY
to add SMCCC discovery mechanism though the PSCI firmware
implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
success on firmware compliant to SMCCC v1.1 and above.
+
+config ARM_SMCCC_SOC_ID
+ bool "SoC bus device for the ARM SMCCC SOC_ID"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default y
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM SMCCC firmware based
+ platforms providing some sysfs information about the SoC variant.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
index 6f369fe3f0b9..72ab84042832 100644
--- a/drivers/firmware/smccc/Makefile
+++ b/drivers/firmware/smccc/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
+obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
new file mode 100644
index 000000000000..581aa5e9b077
--- /dev/null
+++ b/drivers/firmware/smccc/soc_id.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24)
+/*
+ * As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C)
+ * Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification
+ * code with parity bit for the SiP. We can drop the parity bit.
+ */
+#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16)
+#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0)
+
+#define JEP106_BANK_CONT_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x)))
+#define JEP106_ID_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x)))
+#define IMP_DEF_SOC_ID(x) \
+ (u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x)))
+
+static struct soc_device *soc_dev;
+static struct soc_device_attribute *soc_dev_attr;
+
+static int __init smccc_soc_init(void)
+{
+ struct arm_smccc_res res;
+ int soc_id_rev, soc_id_version;
+ static char soc_id_str[20], soc_id_rev_str[12];
+ static char soc_id_jep106_id_str[12];
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_SOC_ID, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
+ return 0;
+ }
+
+ if ((int)res.a0 < 0) {
+ pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
+ res.a0);
+ return -EINVAL;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_version = res.a0;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_rev = res.a0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ sprintf(soc_id_rev_str, "0x%08x", soc_id_rev);
+ sprintf(soc_id_jep106_id_str, "jep106:%02x%02x",
+ JEP106_BANK_CONT_CODE(soc_id_version),
+ JEP106_ID_CODE(soc_id_version));
+ sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str,
+ IMP_DEF_SOC_ID(soc_id_version));
+
+ soc_dev_attr->soc_id = soc_id_str;
+ soc_dev_attr->revision = soc_id_rev_str;
+ soc_dev_attr->family = soc_id_jep106_id_str;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
+ return 0;
+}
+module_init(smccc_soc_init);
+
+static void __exit smccc_soc_exit(void)
+{
+ if (soc_dev)
+ soc_device_unregister(soc_dev);
+ kfree(soc_dev_attr);
+}
+module_exit(smccc_soc_exit);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 636b40d4364d..c1bbba9ee93a 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -4,11 +4,14 @@
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
+static DEFINE_MUTEX(bpmp_debug_lock);
+
struct seqbuf {
char *buf;
size_t pos;
@@ -96,6 +99,354 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
return filename;
}
+static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
+ uint32_t *fd, uint32_t *len, bool write)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ ssize_t sz_name;
+ int err = 0;
+
+ sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
+ if (sz_name < 0) {
+ pr_err("File name too large: %s\n", name);
+ return -EINVAL;
+ }
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *len = resp.fop.datalen;
+ *fd = resp.fop.fd;
+
+ return 0;
+}
+
+static int mrq_debug_close(struct tegra_bpmp *bpmp, uint32_t fd)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_CLOSE),
+ .frd = {
+ .fd = fd,
+ },
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err = 0;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
+ char *data, size_t sz_data, uint32_t *nbytes)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_READ),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ int remaining, err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ if (len > sz_data) {
+ err = -EFBIG;
+ goto close;
+ }
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ memcpy(data, resp.frd.data, resp.frd.readlen);
+ data += resp.frd.readlen;
+ remaining -= resp.frd.readlen;
+ }
+
+ *nbytes = len;
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
+ uint8_t *data, size_t sz_data)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_WRITE)
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ size_t remaining;
+ int err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 1);
+ if (err)
+ goto out;
+
+ if (sz_data > len) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ req.fwr.fd = fd;
+ remaining = sz_data;
+
+ while (remaining > 0) {
+ len = min(remaining, sizeof(req.fwr.data));
+ memcpy(req.fwr.data, data, len);
+ req.fwr.datalen = len;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ data += req.fwr.datalen;
+ remaining -= req.fwr.datalen;
+ }
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int bpmp_debug_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ uint32_t nbytes = 0;
+ size_t len;
+ int err;
+
+ len = seq_get_buf(m, &databuf);
+ if (!databuf)
+ return -ENOMEM;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes);
+ if (!err)
+ seq_commit(m, nbytes);
+
+ return err;
+}
+
+static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ ssize_t err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ databuf = kmalloc(count, GFP_KERNEL);
+ if (!databuf)
+ return -ENOMEM;
+
+ if (copy_from_user(databuf, buf, count)) {
+ err = -EFAULT;
+ goto free_ret;
+ }
+
+ err = mrq_debug_write(bpmp, filename, databuf, count);
+
+free_ret:
+ kfree(databuf);
+
+ return err ?: count;
+}
+
+static int bpmp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, bpmp_debug_show, file, SZ_256K);
+}
+
+static const struct file_operations bpmp_debug_fops = {
+ .open = bpmp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = bpmp_debug_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
+ struct dentry *parent,
+ char *ppath)
+{
+ const size_t pathlen = SZ_256;
+ const size_t bufsize = SZ_16K;
+ uint32_t dsize, attrs = 0;
+ struct dentry *dentry;
+ struct seqbuf seqbuf;
+ char *buf, *pathbuf;
+ const char *name;
+ int err = 0;
+
+ if (!bpmp || !parent || !ppath)
+ return -EINVAL;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pathbuf = kzalloc(pathlen, GFP_KERNEL);
+ if (!pathbuf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
+ if (err)
+ goto out;
+
+ seqbuf_init(&seqbuf, buf, dsize);
+
+ while (!seqbuf_eof(&seqbuf)) {
+ err = seqbuf_read_u32(&seqbuf, &attrs);
+ if (err)
+ goto out;
+
+ err = seqbuf_read_str(&seqbuf, &name);
+ if (err < 0)
+ goto out;
+
+ if (attrs & DEBUGFS_S_ISDIR) {
+ size_t len;
+
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ len = strlen(ppath) + strlen(name) + 1;
+ if (len >= pathlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ strncpy(pathbuf, ppath, pathlen);
+ strncat(pathbuf, name, strlen(name));
+ strcat(pathbuf, "/");
+
+ err = bpmp_populate_debugfs_inband(bpmp, dentry,
+ pathbuf);
+ if (err < 0)
+ goto out;
+ } else {
+ umode_t mode;
+
+ mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
+ mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
+ dentry = debugfs_create_file(name, mode, parent, bpmp,
+ &bpmp_debug_fops);
+ if (!dentry) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(pathbuf);
+ kfree(buf);
+
+ return err;
+}
+
static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
dma_addr_t name, size_t sz_name,
dma_addr_t data, size_t sz_data,
@@ -127,6 +478,8 @@ static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.fop.nbytes;
@@ -184,6 +537,8 @@ static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.dumpdir.nbytes;
@@ -202,7 +557,7 @@ static int debugfs_show(struct seq_file *m, void *p)
char buf[256];
const char *filename;
size_t len, nbytes;
- int ret;
+ int err;
filename = get_filename(bpmp, file, buf, sizeof(buf));
if (!filename)
@@ -216,24 +571,24 @@ static int debugfs_show(struct seq_file *m, void *p)
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
len = strlen(filename);
strncpy(namevirt, filename, namesize);
- ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
&nbytes);
- if (!ret)
+ if (!err)
seq_write(m, datavirt, nbytes);
dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret;
+ return err;
}
static int debugfs_open(struct inode *inode, struct file *file)
@@ -253,7 +608,7 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
char fnamebuf[256];
const char *filename;
size_t len;
- int ret;
+ int err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
@@ -267,7 +622,7 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
@@ -275,11 +630,11 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
strncpy(namevirt, filename, namesize);
if (copy_from_user(datavirt, buf, count)) {
- ret = -EFAULT;
+ err = -EFAULT;
goto free_databuf;
}
- ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
count);
free_databuf:
@@ -287,7 +642,7 @@ free_databuf:
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret ?: count;
+ return err ?: count;
}
static const struct file_operations debugfs_fops = {
@@ -350,59 +705,66 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
return 0;
}
-static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
- size_t bufsize, struct dentry *root)
+static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
{
struct seqbuf seqbuf;
+ const size_t sz = SZ_512K;
+ dma_addr_t phys;
+ size_t nbytes;
+ void *virt;
int err;
- bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror)
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
return -ENOMEM;
- seqbuf_init(&seqbuf, buf, bufsize);
- err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+ err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
if (err < 0) {
- debugfs_remove_recursive(bpmp->debugfs_mirror);
- bpmp->debugfs_mirror = NULL;
+ goto free;
+ } else if (nbytes > sz) {
+ err = -EINVAL;
+ goto free;
}
+ seqbuf_init(&seqbuf, virt, nbytes);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+
return err;
}
int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
{
- dma_addr_t phys;
- void *virt;
- const size_t sz = SZ_256K;
- size_t nbytes;
- int ret;
struct dentry *root;
+ bool inband;
+ int err;
- if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
+
+ if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
return 0;
root = debugfs_create_dir("bpmp", NULL);
if (!root)
return -ENOMEM;
- virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
- GFP_KERNEL | GFP_DMA32);
- if (!virt) {
- ret = -ENOMEM;
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (!bpmp->debugfs_mirror) {
+ err = -ENOMEM;
goto out;
}
- ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
- if (ret < 0)
- goto free;
+ if (inband)
+ err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
+ "/");
+ else
+ err = bpmp_populate_debugfs_shmem(bpmp);
- ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
-free:
- dma_free_coherent(bpmp->dev, sz, virt, phys);
out:
- if (ret < 0)
- debugfs_remove(root);
+ if (err < 0)
+ debugfs_remove_recursive(root);
- return ret;
+ return err;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index fe6702df24bf..4d93d8925e14 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -515,10 +515,10 @@ bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
.size = sizeof(resp),
},
};
- int ret;
+ int err;
- ret = tegra_bpmp_transfer(bpmp, &msg);
- if (ret || msg.rx.ret)
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err || msg.rx.ret)
return false;
return resp.status == 0;
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 4126be9e3216..53cee17d0115 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index f0d068c03944..57cd04062994 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: http://processors.wiki.ti.com/index.php/TISCI for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index e27f68437b56..50bb2a6d6ccf 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -7,6 +7,7 @@
#include <linux/armada-37xx-rwtm-mailbox.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/hw_random.h>
#include <linux/mailbox_client.h>
@@ -69,6 +70,18 @@ struct mox_rwtm {
/* public key burned in eFuse */
int has_pubkey;
u8 pubkey[135];
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Signature process. This is currently done via debugfs, because it
+ * does not conform to the sysfs standard "one file per attribute".
+ * It should be rewritten via crypto API once akcipher API is available
+ * from userspace.
+ */
+ struct dentry *debugfs_root;
+ u32 last_sig[34];
+ int last_sig_done;
+#endif
};
struct mox_kobject {
@@ -279,6 +292,152 @@ unlock_mutex:
return ret;
}
+#ifdef CONFIG_DEBUG_FS
+static int rwtm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ ssize_t ret;
+
+ /* only allow one read, of 136 bytes, from position 0 */
+ if (*ppos != 0)
+ return 0;
+
+ if (len < 136)
+ return -EINVAL;
+
+ if (!rwtm->last_sig_done)
+ return -ENODATA;
+
+ /* 2 arrays of 17 32-bit words are 136 bytes */
+ ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
+ rwtm->last_sig_done = 0;
+
+ return ret;
+}
+
+static ssize_t do_sign_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct armada_37xx_rwtm_tx_msg msg;
+ loff_t dummy = 0;
+ ssize_t ret;
+
+ /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
+ if (len != 64)
+ return -EINVAL;
+
+ /* if last result is not zero user has not read that information yet */
+ if (rwtm->last_sig_done)
+ return -EBUSY;
+
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+
+ /*
+ * Here we have to send:
+ * 1. Address of the input to sign.
+ * The input is an array of 17 32-bit words, the first (most
+ * significat) is 0, the rest 16 words are copied from the SHA-512
+ * hash given by the user and converted from BE to LE.
+ * 2. Address of the buffer where ECDSA signature value R shall be
+ * stored by the rWTM firmware.
+ * 3. Address of the buffer where ECDSA signature value S shall be
+ * stored by the rWTM firmware.
+ */
+ memset(rwtm->buf, 0, 4);
+ ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
+ if (ret < 0)
+ goto unlock_mutex;
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
+
+ msg.command = MBOX_CMD_SIGN;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = rwtm->buf_phys + 68;
+ msg.args[3] = rwtm->buf_phys + 2 * 68;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = MBOX_STS_VALUE(reply->retval);
+ if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
+ goto unlock_mutex;
+
+ /*
+ * Here we read the R and S values of the ECDSA signature
+ * computed by the rWTM firmware and convert their words from
+ * LE to BE.
+ */
+ memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
+ cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
+ rwtm->last_sig_done = 1;
+
+ mutex_unlock(&rwtm->busy);
+ return len;
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+static const struct file_operations do_sign_fops = {
+ .owner = THIS_MODULE,
+ .open = rwtm_debug_open,
+ .read = do_sign_read,
+ .write = do_sign_write,
+ .llseek = no_llseek,
+};
+
+static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("turris-mox-rwtm", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
+ &do_sign_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ rwtm->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+ debugfs_remove_recursive(rwtm->debugfs_root);
+}
+#else
+static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ return 0;
+}
+
+static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+}
+#endif
+
static int turris_mox_rwtm_probe(struct platform_device *pdev)
{
struct mox_rwtm *rwtm;
@@ -340,6 +499,12 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
goto free_channel;
}
+ ret = rwtm_register_debugfs(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
+ goto free_channel;
+ }
+
return 0;
free_channel:
@@ -355,6 +520,7 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev)
{
struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+ rwtm_unregister_debugfs(rwtm);
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
kobject_put(rwtm_to_kobj(rwtm));
mbox_free_channel(rwtm->mbox);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 7cd8f415fd02..e6e134ae9c32 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -487,6 +487,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
mtk_crtc_ddp_config(crtc, cmdq_handle);
+ cmdq_pkt_finalize(cmdq_handle);
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
}
#endif
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7f130829bf01..dead5db3315a 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -559,6 +559,22 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->adap.dev.of_node = dev->of_node;
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ ret = geni_icc_get(&gi2c->se, "qup-memory");
+ if (ret)
+ return ret;
+ /*
+ * Set the bus quota for core and cpu to a reasonable value for
+ * register access.
+ * Set quota for DDR based on bus speed.
+ */
+ gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
+ gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+ gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
+
+ ret = geni_icc_set_bw(&gi2c->se);
+ if (ret)
+ return ret;
+
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
dev_err(dev, "Error turning on resources %d\n", ret);
@@ -581,6 +597,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
return ret;
}
+ ret = geni_icc_disable(&gi2c->se);
+ if (ret)
+ return ret;
+
dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
gi2c->suspended = 1;
@@ -625,7 +645,7 @@ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
gi2c->suspended = 1;
}
- return 0;
+ return geni_icc_disable(&gi2c->se);
}
static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
@@ -633,6 +653,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
int ret;
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ ret = geni_icc_enable(&gi2c->se);
+ if (ret)
+ return ret;
+
ret = geni_se_resources_on(&gi2c->se);
if (ret)
return ret;
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index 2a11a63e7217..a3d2ef1d9903 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -266,11 +266,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
if (!commit_idx[0])
goto out;
- ret = rpmh_invalidate(voter->dev);
- if (ret) {
- pr_err("Error invalidating RPMH client (%d)\n", ret);
- goto out;
- }
+ rpmh_invalidate(voter->dev);
ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
cmds, commit_idx);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 7e3ebf6ed2cd..85de19fe9b6e 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Aggregator irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 59d51a20bbd8..5ea148faf2ab 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Router irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 04368ee2a809..d46f21db9b1f 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -5,6 +5,12 @@
menuconfig MEMORY
bool "Memory Controller drivers"
+ help
+ This option allows to enable specific memory controller drivers,
+ useful mostly on embedded systems. These could be controllers
+ for DRAM (SDR, DDR), ROM, SRAM and others. The drivers features
+ vary from memory tuning and frequency scaling to enabling
+ access to attached peripherals through memory bus.
if MEMORY
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 82b415be18d1..60e8633b1175 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -23,7 +23,7 @@
* - BE kernel + LE firmware image
* - BE kernel + BE firmware image
*
- * The DPCU always runs in big endian mode. The firwmare image, however, can
+ * The DPCU always runs in big endian mode. The firmware image, however, can
* be in either format. Also, communication between host CPU and DCPU is
* always in little endian.
*/
@@ -188,7 +188,7 @@ struct brcmstb_dpfe_priv {
struct mutex lock;
};
-static const char *error_text[] = {
+static const char * const error_text[] = {
"Success", "Header code incorrect", "Unknown command or argument",
"Incorrect checksum", "Malformed command", "Timed out",
};
@@ -379,9 +379,8 @@ static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
void __iomem *ptr = NULL;
/* There is no need to use this function for API v3 or later. */
- if (unlikely(priv->dpfe_api->version >= 3)) {
+ if (unlikely(priv->dpfe_api->version >= 3))
return NULL;
- }
msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
index 633fea6a4edf..85965fa26e0b 100644
--- a/drivers/memory/bt1-l2-ctl.c
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -66,6 +66,7 @@ struct l2_ctl_device_attribute {
struct device_attribute dev_attr;
enum l2_ctl_stall id;
};
+
#define to_l2_ctl_dev_attr(_dev_attr) \
container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
@@ -242,6 +243,7 @@ static ssize_t l2_ctl_latency_store(struct device *dev,
return count;
}
+
static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
index e8f9b3f461f5..872addd0ec60 100644
--- a/drivers/memory/da8xx-ddrctl.c
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -102,14 +102,12 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev)
{
const struct da8xx_ddrctl_config_knob *knob;
const struct da8xx_ddrctl_setting *setting;
- struct device_node *node;
struct resource *res;
void __iomem *ddrctl;
struct device *dev;
u32 reg;
dev = &pdev->dev;
- node = dev->of_node;
setting = da8xx_ddrctl_get_board_settings();
if (!setting) {
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index db8043019ec6..4b98d1854cd7 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TI AM33XX EMIF PM Assembly Offsets
*
* Copyright (C) 2016-2017 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ti-emif-sram.h>
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 9d9127bf2a59..bb6a71d26798 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -282,10 +282,9 @@ static void set_lpmode(struct emif_data *emif, u8 lpmode)
* the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
*/
if ((emif->plat_data->ip_rev == EMIF_4D) &&
- (EMIF_LP_MODE_PWR_DN == lpmode)) {
+ (lpmode == EMIF_LP_MODE_PWR_DN)) {
WARN_ONCE(1,
- "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
- "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
+ "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
/* rollback LP_MODE to Self-refresh mode */
lpmode = EMIF_LP_MODE_SELF_REFRESH;
}
@@ -714,7 +713,7 @@ static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
fifo_we_slave_ratio << 22;
@@ -725,7 +724,7 @@ static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
@@ -736,7 +735,7 @@ static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
fifo_we_slave_ratio << 13;
@@ -975,8 +974,7 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
dev_err(emif->dev,
- "%s:NOT Extended temperature capable memory."
- "Converting MR4=0x%02x as shutdown event\n",
+ "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
__func__, emif->temperature_level);
/*
* Temperature far too high - do kernel_power_off()
@@ -1318,9 +1316,9 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
dev_info->cal_resistors_per_cs = true;
- if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
+ if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
dev_info->type = DDR_TYPE_LPDDR2_S4;
- else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
+ else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
dev_info->type = DDR_TYPE_LPDDR2_S2;
of_property_read_u32(np_ddr, "density", &density);
@@ -1563,11 +1561,8 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
goto error;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
goto error;
- }
emif_onetime_settings(emif);
emif_debugfs_init(emif);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index a2c971743ffe..89f99b5b6450 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -53,6 +53,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
+
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
return i;
@@ -153,8 +154,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
/* read for chip select error */
cs_err = ifc_in32(&ifc->cm_evter_stat);
if (cs_err) {
- dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
- "any memory bank 0x%08X\n", cs_err);
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to any memory bank 0x%08X\n",
+ cs_err);
/* clear the chip select error */
ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
@@ -163,24 +164,24 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
err_addr = ifc_in32(&ifc->cm_erattr1);
if (status & IFC_CM_ERATTR0_ERTYP_READ)
- dev_err(ctrl->dev, "Read transaction error"
- "CM_ERATTR0 0x%08X\n", status);
+ dev_err(ctrl->dev, "Read transaction error CM_ERATTR0 0x%08X\n",
+ status);
else
- dev_err(ctrl->dev, "Write transaction error"
- "CM_ERATTR0 0x%08X\n", status);
+ dev_err(ctrl->dev, "Write transaction error CM_ERATTR0 0x%08X\n",
+ status);
err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
IFC_CM_ERATTR0_ERAID_SHIFT;
- dev_err(ctrl->dev, "AXI ID of the error"
- "transaction 0x%08X\n", err_axiid);
+ dev_err(ctrl->dev, "AXI ID of the error transaction 0x%08X\n",
+ err_axiid);
err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
IFC_CM_ERATTR0_ESRCID_SHIFT;
- dev_err(ctrl->dev, "SRC ID of the error"
- "transaction 0x%08X\n", err_srcid);
+ dev_err(ctrl->dev, "SRC ID of the error transaction 0x%08X\n",
+ err_srcid);
- dev_err(ctrl->dev, "Transaction Address corresponding to error"
- "ERADDR 0x%08X\n", err_addr);
+ dev_err(ctrl->dev, "Transaction Address corresponding to error ERADDR 0x%08X\n",
+ err_addr);
ret = IRQ_HANDLED;
}
@@ -199,7 +200,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
* the resources needed for the controller only. The
* resources for the NAND banks themselves are allocated
* in the chip probe function.
-*/
+ */
static int fsl_ifc_ctrl_probe(struct platform_device *dev)
{
int ret = 0;
@@ -250,8 +251,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (fsl_ifc_ctrl_dev->irq == 0) {
- dev_err(&dev->dev, "failed to get irq resource "
- "for IFC\n");
+ dev_err(&dev->dev, "failed to get irq resource for IFC\n");
ret = -ENODEV;
goto err;
}
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index b232ed279fc3..3ec5cb0fce1e 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -22,6 +23,8 @@
#define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4))
#define NEMC_NFCSR 0x50
+#define NEMC_REG_LEN 0x54
+
#define NEMC_SMCR_SMT BIT(0)
#define NEMC_SMCR_BW_SHIFT 6
#define NEMC_SMCR_BW_MASK (0x3 << NEMC_SMCR_BW_SHIFT)
@@ -288,7 +291,19 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
nemc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nemc->base = devm_ioremap_resource(dev, res);
+
+ /*
+ * The driver currently only uses the registers up to offset
+ * NEMC_REG_LEN. Since the EFUSE registers are in the middle of the
+ * NEMC registers, we only request the registers we will use for now;
+ * that way the EFUSE driver can probe too.
+ */
+ if (!devm_request_mem_region(dev, res->start, NEMC_REG_LEN, dev_name(dev))) {
+ dev_err(dev, "unable to request I/O memory region\n");
+ return -EBUSY;
+ }
+
+ nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
if (IS_ERR(nemc->base)) {
dev_err(dev, "failed to get I/O memory\n");
return PTR_ERR(nemc->base);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index a113e811faab..e154bea3cf14 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -60,7 +60,7 @@ struct mtk_smi_common_plat {
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
- void (*config_port)(struct device *);
+ void (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
bool has_gals;
};
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 886aea587276..8450638e8670 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -124,32 +124,32 @@ static int devbus_get_timing_params(struct devbus *devbus,
* The bus width is encoded into the register as 0 for 8 bits,
* and 1 for 16 bits, so we do the necessary conversion here.
*/
- if (r->bus_width == 8)
+ if (r->bus_width == 8) {
r->bus_width = 0;
- else if (r->bus_width == 16)
+ } else if (r->bus_width == 16) {
r->bus_width = 1;
- else {
+ } else {
dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
return -EINVAL;
}
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
- &r->badr_skew);
+ &r->badr_skew);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
- &r->turn_off);
+ &r->turn_off);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
- &r->acc_first);
+ &r->acc_first);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
- &r->acc_next);
+ &r->acc_next);
if (err < 0)
return err;
@@ -175,17 +175,17 @@ static int devbus_get_timing_params(struct devbus *devbus,
}
err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
- &w->ale_wr);
+ &w->ale_wr);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
- &w->wr_low);
+ &w->wr_low);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
- &w->wr_high);
+ &w->wr_high);
if (err < 0)
return err;
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 71f26eac7350..d9f5437d3bce 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -4,11 +4,10 @@
*
* Copyright (C) 2012 Texas Instruments, Inc.
* Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
*/
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/export.h>
@@ -19,7 +18,7 @@
/**
* of_get_min_tck() - extract min timing values for ddr
* @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
*
* Populates the lpddr2_min_tck structure by extracting data
* from device tree node. Returns a pointer to the populated
@@ -27,7 +26,7 @@
* default min timings provided by JEDEC.
*/
const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
- struct device *dev)
+ struct device *dev)
{
int ret = 0;
struct lpddr2_min_tck *min;
@@ -56,13 +55,13 @@ const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
return min;
default_min_tck:
- dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ dev_warn(dev, "Using default min-tck values\n");
return &lpddr2_jedec_min_tck;
}
EXPORT_SYMBOL(of_get_min_tck);
static int of_do_get_timings(struct device_node *np,
- struct lpddr2_timings *tim)
+ struct lpddr2_timings *tim)
{
int ret;
@@ -84,7 +83,7 @@ static int of_do_get_timings(struct device_node *np,
ret |= of_property_read_u32(np, "tZQinit", &tim->tZQinit);
ret |= of_property_read_u32(np, "tRAS-max-ns", &tim->tRAS_max_ns);
ret |= of_property_read_u32(np, "tDQSCK-max-derated",
- &tim->tDQSCK_max_derated);
+ &tim->tDQSCK_max_derated);
return ret;
}
@@ -103,7 +102,9 @@ static int of_do_get_timings(struct device_node *np,
* while populating, returns default timings provided by JEDEC.
*/
const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
- struct device *dev, u32 device_type, u32 *nr_frequencies)
+ struct device *dev,
+ u32 device_type,
+ u32 *nr_frequencies)
{
struct lpddr2_timings *timings = NULL;
u32 arr_sz = 0, i = 0;
@@ -116,7 +117,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
tim_compat = "jedec,lpddr2-timings";
break;
default:
- dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ dev_warn(dev, "Unsupported memory type\n");
}
for_each_child_of_node(np_ddr, np_tim)
@@ -145,7 +146,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
return timings;
default_timings:
- dev_warn(dev, "%s: using default timings\n", __func__);
+ dev_warn(dev, "Using default memory timings\n");
*nr_frequencies = ARRAY_SIZE(lpddr2_jedec_timings);
return lpddr2_jedec_timings;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL(of_get_ddr_timings);
/**
* of_lpddr3_get_min_tck() - extract min timing values for lpddr3
* @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
*
* Populates the lpddr3_min_tck structure by extracting data
* from device tree node. Returns a pointer to the populated
@@ -193,8 +194,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
if (ret) {
- dev_warn(dev, "%s: errors while parsing min-tck values\n",
- __func__);
+ dev_warn(dev, "Errors while parsing min-tck values\n");
devm_kfree(dev, min);
goto default_min_tck;
}
@@ -202,7 +202,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
return min;
default_min_tck:
- dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ dev_warn(dev, "Using default min-tck values\n");
return NULL;
}
EXPORT_SYMBOL(of_lpddr3_get_min_tck);
@@ -264,7 +264,7 @@ const struct lpddr3_timings
tim_compat = "jedec,lpddr3-timings";
break;
default:
- dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ dev_warn(dev, "Unsupported memory type\n");
}
for_each_child_of_node(np_ddr, np_tim)
@@ -293,7 +293,7 @@ const struct lpddr3_timings
return timings;
default_timings:
- dev_warn(dev, "%s: failed to get timings\n", __func__);
+ dev_warn(dev, "Failed to get timings\n");
*nr_frequencies = 0;
return NULL;
}
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index e39ecc4c733d..4a99b232ab0a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -3,22 +3,23 @@
* OpenFirmware helpers for memory drivers
*
* Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
*/
#ifndef __LINUX_MEMORY_OF_REG_H
#define __LINUX_MEMORY_OF_REG_H
#if defined(CONFIG_OF) && defined(CONFIG_DDR)
-extern const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
- struct device *dev);
-extern const struct lpddr2_timings
- *of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
- u32 device_type, u32 *nr_frequencies);
-extern const struct lpddr3_min_tck
- *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev);
-extern const struct lpddr3_timings
- *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
- struct device *dev, u32 device_type, u32 *nr_frequencies);
+const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev,
+ u32 device_type, u32 *nr_frequencies);
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr3_timings *
+of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index eff26c1b1394..f512cbc7a36c 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -29,6 +29,7 @@
#include <linux/of_platform.h>
#include <linux/omap-gpmc.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -108,8 +109,8 @@
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE 2
-#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
-#define GPMC_REVISION_MINOR(l) (l & 0xf)
+#define GPMC_REVISION_MAJOR(l) (((l) >> 4) & 0xf)
+#define GPMC_REVISION_MINOR(l) ((l) & 0xf)
#define GPMC_HAS_WR_ACCESS 0x1
#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
@@ -140,27 +141,27 @@
#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
-#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
+#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) (((val) & 3) << 25)
/** CLKACTIVATIONTIME Max Ticks */
#define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
-#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
+#define GPMC_CONFIG1_PAGE_LEN(val) (((val) & 3) << 23)
/** ATTACHEDDEVICEPAGELENGTH Max Value */
#define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
-#define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
+#define GPMC_CONFIG1_WAIT_MON_TIME(val) (((val) & 3) << 18)
/** WAITMONITORINGTIME Max Ticks */
#define GPMC_CONFIG1_WAITMONITORINGTIME_MAX 2
-#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
-#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
+#define GPMC_CONFIG1_WAIT_PIN_SEL(val) (((val) & 3) << 16)
+#define GPMC_CONFIG1_DEVICESIZE(val) (((val) & 3) << 12)
#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
/** DEVICESIZE Max Value */
#define GPMC_CONFIG1_DEVICESIZE_MAX 1
-#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
+#define GPMC_CONFIG1_DEVICETYPE(val) (((val) & 3) << 10)
#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
-#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
+#define GPMC_CONFIG1_MUXTYPE(val) (((val) & 3) << 8)
#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
-#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
+#define GPMC_CONFIG1_FCLK_DIV(val) ((val) & 3)
#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
@@ -245,7 +246,7 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
static unsigned int gpmc_cs_num = GPMC_CS_NUM;
static unsigned int gpmc_nr_waitpins;
static resource_size_t phys_base, mem_size;
-static unsigned gpmc_capability;
+static unsigned int gpmc_capability;
static void __iomem *gpmc_base;
static struct clk *gpmc_l3_clk;
@@ -291,15 +292,14 @@ static unsigned long gpmc_get_fclk_period(void)
/**
* gpmc_get_clk_period - get period of selected clock domain in ps
- * @cs Chip Select Region.
- * @cd Clock Domain.
+ * @cs: Chip Select Region.
+ * @cd: Clock Domain.
*
* GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
* prior to calling this function with GPMC_CD_CLK.
*/
static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
{
-
unsigned long tick_ps = gpmc_get_fclk_period();
u32 l;
int div;
@@ -319,7 +319,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
}
return tick_ps;
-
}
static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
@@ -411,7 +410,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
* @reg: GPMC_CS_CONFIGn register offset.
* @st_bit: Start Bit
* @end_bit: End Bit. Must be >= @st_bit.
- * @ma:x Maximum parameter value (before optional @shift).
+ * @max: Maximum parameter value (before optional @shift).
* If 0, maximum is as high as @st_bit and @end_bit allow.
* @name: DTS node name, w/o "gpmc,"
* @cd: Clock Domain of timing parameter.
@@ -511,7 +510,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
- GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
+ GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
@@ -625,9 +624,8 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
l = gpmc_cs_read_reg(cs, reg);
#ifdef CONFIG_OMAP_GPMC_DEBUG
- pr_info(
- "GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
- cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
+ pr_info("GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
+ cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
(l >> st_bit) & mask, time);
#endif
l &= ~(mask << st_bit);
@@ -662,7 +660,6 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
*/
static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
{
-
int div = gpmc_ns_to_ticks(wait_monitoring);
div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
@@ -674,7 +671,6 @@ static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
div = 1;
return div;
-
}
/**
@@ -728,7 +724,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
if (!s->sync_read && !s->sync_write &&
(s->wait_on_read || s->wait_on_write)
) {
-
div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
if (div < 0) {
pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
@@ -958,7 +953,7 @@ static int gpmc_cs_remap(int cs, u32 base)
* Make sure we ignore any device offsets from the GPMC partition
* allocated for the chip select and that the new base confirms
* to the GPMC 16MB minimum granularity.
- */
+ */
base &= ~(SZ_16M - 1);
gpmc_cs_get_memconf(cs, &old_base, &size);
@@ -1087,7 +1082,7 @@ static struct gpmc_nand_ops nand_ops = {
/**
* gpmc_omap_get_nand_ops - Get the GPMC NAND interface
- * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @reg: the GPMC NAND register map exclusive for NAND use.
* @cs: GPMC chip select number on which the NAND sits. The
* register map returned will be specific to this chip select.
*
@@ -1242,7 +1237,7 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
}
EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
-int gpmc_get_client_irq(unsigned irq_config)
+int gpmc_get_client_irq(unsigned int irq_config)
{
if (!gpmc_irq_domain) {
pr_warn("%s called before GPMC IRQ domain available\n",
@@ -1465,7 +1460,6 @@ static void gpmc_mem_exit(void)
continue;
gpmc_cs_delete_mem(cs);
}
-
}
static void gpmc_mem_init(void)
@@ -1634,17 +1628,14 @@ static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
/* oe_on */
temp = dev_t->t_oeasu;
if (mux)
- temp = max_t(u32, temp,
- gpmc_t->adv_rd_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off + dev_t->t_aavdh);
gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
/* access */
temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
- gpmc_t->oe_on + dev_t->t_oe);
- temp = max_t(u32, temp,
- gpmc_t->cs_on + dev_t->t_ce);
- temp = max_t(u32, temp,
- gpmc_t->adv_on + dev_t->t_aa);
+ gpmc_t->oe_on + dev_t->t_oe);
+ temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce);
+ temp = max_t(u32, temp, gpmc_t->adv_on + dev_t->t_aa);
gpmc_t->access = gpmc_round_ps_to_ticks(temp);
gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
@@ -1753,10 +1744,11 @@ static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
return 0;
}
-/* TODO: remove this function once all peripherals are confirmed to
+/*
+ * TODO: remove this function once all peripherals are confirmed to
* work with generic timing. Simultaneously gpmc_cs_set_timings()
* has to be modified to handle timings in ps instead of ns
-*/
+ */
static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
{
t->cs_on /= 1000;
@@ -2089,7 +2081,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
gpmc_cs_disable_mem(cs);
/*
- * FIXME: gpmc_cs_request() will map the CS to an arbitary
+ * FIXME: gpmc_cs_request() will map the CS to an arbitrary
* location in the gpmc address space. When booting with
* device-tree we want the NOR flash to be mapped to the
* location specified in the device-tree blob. So remap the
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index ff57195b4e37..575fadbffa30 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Memory controller driver for ARM PrimeCell PL172
* PrimeCell MultiPort Memory Controller (PL172)
@@ -6,10 +7,6 @@
*
* Based on:
* TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/amba/bus.h>
@@ -24,7 +21,7 @@
#include <linux/of_platform.h>
#include <linux/time.h>
-#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * n)
+#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * (n))
#define MPMC_STATIC_CFG_MW_8BIT 0x0
#define MPMC_STATIC_CFG_MW_16BIT 0x1
#define MPMC_STATIC_CFG_MW_32BIT 0x2
@@ -34,17 +31,17 @@
#define MPMC_STATIC_CFG_EW BIT(8)
#define MPMC_STATIC_CFG_B BIT(19)
#define MPMC_STATIC_CFG_P BIT(20)
-#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * (n))
#define MPMC_STATIC_WAIT_WEN_MAX 0x0f
-#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * n)
+#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * (n))
#define MPMC_STATIC_WAIT_OEN_MAX 0x0f
-#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * n)
+#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * (n))
#define MPMC_STATIC_WAIT_RD_MAX 0x1f
-#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * n)
+#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * (n))
#define MPMC_STATIC_WAIT_PAGE_MAX 0x1f
-#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * (n))
#define MPMC_STATIC_WAIT_WR_MAX 0x1f
-#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * n)
+#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * (n))
#define MPMC_STATIC_WAIT_TURN_MAX 0x0f
/* Maximum number of static chip selects */
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 20a8406ce786..8e240f078afc 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -23,5 +23,12 @@ config EXYNOS5422_DMC
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+ help
+ This adds driver for Samsung Exynos SoC SROM controller. The driver
+ in basic operation mode only saves and restores SROM registers
+ during suspend. If however appropriate device tree configuration
+ is provided, the driver enables support for external memory
+ or external devices.
+ If unsure, say Y on devices with Samsung Exynos SocS.
endif
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index 6510d7bab217..e73dd330af47 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -47,9 +47,9 @@ struct exynos_srom {
struct exynos_srom_reg_dump *reg_offset;
};
-static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
- const unsigned long *rdump,
- unsigned long nr_rdump)
+static struct exynos_srom_reg_dump *
+exynos_srom_alloc_reg_dump(const unsigned long *rdump,
+ unsigned long nr_rdump)
{
struct exynos_srom_reg_dump *rd;
unsigned int i;
@@ -116,7 +116,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
}
srom = devm_kzalloc(&pdev->dev,
- sizeof(struct exynos_srom), GFP_KERNEL);
+ sizeof(struct exynos_srom), GFP_KERNEL);
if (!srom)
return -ENOMEM;
@@ -130,7 +130,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
if (!srom->reg_offset) {
iounmap(srom->reg_base);
return -ENOMEM;
@@ -157,16 +157,16 @@ static int exynos_srom_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static void exynos_srom_save(void __iomem *base,
- struct exynos_srom_reg_dump *rd,
- unsigned int num_regs)
+ struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
{
for (; num_regs > 0; --num_regs, ++rd)
rd->value = readl(base + rd->offset);
}
static void exynos_srom_restore(void __iomem *base,
- const struct exynos_srom_reg_dump *rd,
- unsigned int num_regs)
+ const struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
{
for (; num_regs > 0; --num_regs, ++rd)
writel(rd->value, base + rd->offset);
@@ -177,7 +177,7 @@ static int exynos_srom_suspend(struct device *dev)
struct exynos_srom *srom = dev_get_drvdata(dev);
exynos_srom_save(srom->reg_base, srom->reg_offset,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
return 0;
}
@@ -186,7 +186,7 @@ static int exynos_srom_resume(struct device *dev)
struct exynos_srom *srom = dev_get_drvdata(dev);
exynos_srom_restore(srom->reg_base, srom->reg_offset,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
return 0;
}
#endif
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 25196d6268e2..4312233e8e0a 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -270,12 +270,14 @@ static int find_target_freq_idx(struct exynos5_dmc *dmc,
* This function switches between these banks according to the
* currently used clock source.
*/
-static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
{
unsigned int reg;
int ret;
ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+ if (ret)
+ return ret;
if (set)
reg |= EXYNOS5_TIMING_SET_SWI;
@@ -283,6 +285,8 @@ static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
reg &= ~EXYNOS5_TIMING_SET_SWI;
regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+
+ return 0;
}
/**
@@ -516,7 +520,7 @@ exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
/*
* Delays are long enough, so use them for the new coming clock.
*/
- exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+ ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
return ret;
}
@@ -577,7 +581,9 @@ exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
clk_set_rate(dmc->fout_bpll, target_rate);
- exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+ ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+ if (ret)
+ goto disable_clocks;
ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
if (ret)
@@ -1392,7 +1398,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
return PTR_ERR(dmc->base_drexi1);
dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
- "samsung,syscon-clk");
+ "samsung,syscon-clk");
if (IS_ERR(dmc->clk_regmap))
return PTR_ERR(dmc->clk_regmap);
@@ -1471,7 +1477,6 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
exynos5_dmc_df_profile.polling_ms = 500;
}
-
dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&dmc->gov_data);
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index fbfbaada61a2..9f0a96bf9ccc 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -36,3 +36,17 @@ config TEGRA124_EMC
Tegra124 chips. The EMC controls the external DRAM on the board.
This driver is required to change memory timings / clock rate for
external memory.
+
+config TEGRA210_EMC_TABLE
+ bool
+ depends on ARCH_TEGRA_210_SOC
+
+config TEGRA210_EMC
+ tristate "NVIDIA Tegra210 External Memory Controller driver"
+ depends on TEGRA_MC && ARCH_TEGRA_210_SOC
+ select TEGRA210_EMC_TABLE
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra210 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 529d10bc5650..6c1a2ecc6628 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -13,5 +13,9 @@ obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
+obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
+obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
+
+tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 957c6eb74ff9..afa3ba45c9e6 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -34,6 +34,7 @@
#define MC_EMEM_ARB_TIMING_W2W 0xbc
#define MC_EMEM_ARB_TIMING_R2W 0xc0
#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_MISC2 0xc8
#define MC_EMEM_ARB_DA_TURNS 0xd0
#define MC_EMEM_ARB_DA_COVERS 0xd4
#define MC_EMEM_ARB_MISC0 0xd8
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 33b8216bac30..ba5cb1f4dfc2 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -984,6 +984,7 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra124-emc" },
+ { .compatible = "nvidia,tegra132-emc" },
{}
};
@@ -1178,11 +1179,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
&tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 97f26bc77ad4..8478f59db432 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -185,7 +185,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
if (IS_ERR(emc->clk)) {
err = PTR_ERR(emc->clk);
dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
- return err;
+ goto put_bpmp;
}
platform_set_drvdata(pdev, emc);
@@ -201,7 +201,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
err = tegra_bpmp_transfer(emc->bpmp, &msg);
if (err < 0) {
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
- return err;
+ goto put_bpmp;
}
emc->debugfs.min_rate = ULONG_MAX;
@@ -211,8 +211,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
sizeof(*emc->dvfs), GFP_KERNEL);
- if (!emc->dvfs)
- return -ENOMEM;
+ if (!emc->dvfs) {
+ err = -ENOMEM;
+ goto put_bpmp;
+ }
dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
@@ -237,15 +239,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
"failed to set rate range [%lu-%lu] for %pC\n",
emc->debugfs.min_rate, emc->debugfs.max_rate,
emc->clk);
- return err;
+ goto put_bpmp;
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(&pdev->dev, "failed to create debugfs directory\n");
- return 0;
- }
-
debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
emc, &tegra186_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
@@ -254,6 +251,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
emc, &tegra186_emc_debug_max_rate_fops);
return 0;
+
+put_bpmp:
+ tegra_bpmp_put(emc->bpmp);
+ return err;
}
static int tegra186_emc_remove(struct platform_device *pdev)
@@ -267,10 +268,10 @@ static int tegra186_emc_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra186_emc_of_match[] = {
-#if defined(CONFIG_ARCH_TEGRA186_SOC)
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
{ .compatible = "nvidia,tegra186-emc" },
#endif
-#if defined(CONFIG_ARCH_TEGRA194_SOC)
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra194-emc" },
#endif
{ /* sentinel */ }
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 5d53f11ca7b6..e25c954dde2e 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -1570,12 +1570,12 @@ static const struct of_device_id tegra186_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
-static int tegra186_mc_suspend(struct device *dev)
+static int __maybe_unused tegra186_mc_suspend(struct device *dev)
{
return 0;
}
-static int tegra186_mc_resume(struct device *dev)
+static int __maybe_unused tegra186_mc_resume(struct device *dev)
{
struct tegra186_mc *mc = dev_get_drvdata(dev);
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index b16715e9515d..027f46287dbf 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -7,11 +7,11 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -144,7 +144,6 @@ struct emc_timing {
struct tegra_emc {
struct device *dev;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -162,17 +161,13 @@ struct tegra_emc {
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
if (!status)
return IRQ_NONE;
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT)
- complete(&emc->clk_handshake_complete);
-
/* notify about HW problem */
if (status & EMC_REFRESH_OVERFLOW_INT)
dev_err_ratelimited(emc->dev,
@@ -224,14 +219,13 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
/* wait until programming has settled */
readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
- reinit_completion(&emc->clk_handshake_complete);
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
{
- unsigned long timeout;
+ int err;
+ u32 v;
dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
@@ -242,11 +236,12 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
return 0;
}
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "EMC-CAR handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
return 0;
@@ -412,7 +407,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 emc_cfg, emc_dbg;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -647,11 +642,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@ -686,7 +681,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
new file mode 100644
index 000000000000..ff55a17896fa
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -0,0 +1,1775 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/*
+ * Enable flags for specifying verbosity.
+ */
+#define INFO (1 << 0)
+#define STEPS (1 << 1)
+#define SUB_STEPS (1 << 2)
+#define PRELOCK (1 << 3)
+#define PRELOCK_STEPS (1 << 4)
+#define ACTIVE_EN (1 << 5)
+#define PRAMP_UP (1 << 6)
+#define PRAMP_DN (1 << 7)
+#define EMA_WRITES (1 << 10)
+#define EMA_UPDATES (1 << 11)
+#define PER_TRAIN (1 << 16)
+#define CC_PRINT (1 << 17)
+#define CCFIFO (1 << 29)
+#define REGS (1 << 30)
+#define REG_LISTS (1 << 31)
+
+#define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
+
+#define DVFS_CLOCK_CHANGE_VERSION 21021
+#define EMC_PRELOCK_VERSION 2101
+
+enum {
+ DVFS_SEQUENCE = 1,
+ WRITE_TRAINING_SEQUENCE = 2,
+ PERIODIC_TRAINING_SEQUENCE = 3,
+ DVFS_PT1 = 10,
+ DVFS_UPDATE = 11,
+ TRAINING_PT1 = 12,
+ TRAINING_UPDATE = 13,
+ PERIODIC_TRAINING_UPDATE = 14
+};
+
+/*
+ * PTFV defines - basically just indexes into the per table PTFV array.
+ */
+#define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
+#define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
+#define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
+#define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
+#define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
+#define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
+#define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
+#define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
+#define PTFV_DVFS_SAMPLES_INDEX 9
+#define PTFV_MOVAVG_WEIGHT_INDEX 10
+#define PTFV_CONFIG_CTRL_INDEX 11
+
+#define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
+
+/*
+ * Do arithmetic in fixed point.
+ */
+#define MOVAVG_PRECISION_FACTOR 100
+
+/*
+ * The division portion of the average operation.
+ */
+#define __AVERAGE_PTFV(dev) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
+ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+/*
+ * Convert val to fixed point and add it to the temporary average.
+ */
+#define __INCREMENT_PTFV(dev, val) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
+ ((val) * MOVAVG_PRECISION_FACTOR); })
+
+/*
+ * Convert a moving average back to integral form and return the value.
+ */
+#define __MOVAVG_AC(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ MOVAVG_PRECISION_FACTOR)
+
+/* Weighted update. */
+#define __WEIGHTED_UPDATE_PTFV(dev, nval) \
+ do { \
+ int w = PTFV_MOVAVG_WEIGHT_INDEX; \
+ int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
+ \
+ next->ptfv_list[dqs] = \
+ ((nval * MOVAVG_PRECISION_FACTOR) + \
+ (next->ptfv_list[dqs] * \
+ next->ptfv_list[w])) / \
+ (next->ptfv_list[w] + 1); \
+ \
+ emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
+ __stringify(dev), nval, next->ptfv_list[dqs]); \
+ } while (0)
+
+/* Access a particular average. */
+#define __MOVAVG(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+
+static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+{
+ bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
+ struct tegra210_emc_timing *last = emc->last;
+ struct tegra210_emc_timing *next = emc->next;
+ u32 last_timing_rate_mhz = last->rate / 1000;
+ u32 next_timing_rate_mhz = next->rate / 1000;
+ bool dvfs_update = type == DVFS_UPDATE;
+ s32 tdel = 0, tmdel = 0, adel = 0;
+ bool dvfs_pt1 = type == DVFS_PT1;
+ unsigned long cval = 0;
+ u32 temp[2][2], value;
+ unsigned int i;
+
+ /*
+ * Dev0 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 2, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev0 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U0] -
+ __MOVAVG_AC(next, C0D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U0] =
+ __MOVAVG_AC(next, C0D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U1] -
+ __MOVAVG_AC(next, C0D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U1] =
+ __MOVAVG_AC(next, C0D0U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U0] -
+ __MOVAVG_AC(next, C1D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U0] =
+ __MOVAVG_AC(next, C1D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U1] -
+ __MOVAVG_AC(next, C1D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U1] =
+ __MOVAVG_AC(next, C1D0U1);
+ }
+ }
+
+ if (emc->num_devices < 2)
+ goto done;
+
+ /*
+ * Dev1 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 1, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev1 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U0] -
+ __MOVAVG_AC(next, C0D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U0] =
+ __MOVAVG_AC(next, C0D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U1] -
+ __MOVAVG_AC(next, C0D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U1] =
+ __MOVAVG_AC(next, C0D1U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U0] -
+ __MOVAVG_AC(next, C1D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U0] =
+ __MOVAVG_AC(next, C1D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U1] -
+ __MOVAVG_AC(next, C1D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U1] =
+ __MOVAVG_AC(next, C1D1U1);
+ }
+ }
+
+done:
+ return adel;
+}
+
+static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+ struct tegra210_emc_timing *last,
+ struct tegra210_emc_timing *next)
+{
+#define __COPY_EMA(nt, lt, dev) \
+ ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
+ (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+ u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+ u32 delay;
+
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay = 2 + (delay / last->rate);
+
+ if (!next->periodic_training)
+ return 0;
+
+ if (type == DVFS_SEQUENCE) {
+ if (last->periodic_training &&
+ (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
+ PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
+ /*
+ * If the previous frequency was using periodic
+ * calibration then we can reuse the previous
+ * frequencies EMA data.
+ */
+ __COPY_EMA(next, last, C0D0U0);
+ __COPY_EMA(next, last, C0D0U1);
+ __COPY_EMA(next, last, C1D0U0);
+ __COPY_EMA(next, last, C1D0U1);
+ __COPY_EMA(next, last, C0D1U0);
+ __COPY_EMA(next, last, C0D1U1);
+ __COPY_EMA(next, last, C1D1U0);
+ __COPY_EMA(next, last, C1D1U1);
+ } else {
+ /* Reset the EMA.*/
+ __MOVAVG(next, C0D0U0) = 0;
+ __MOVAVG(next, C0D0U1) = 0;
+ __MOVAVG(next, C1D0U0) = 0;
+ __MOVAVG(next, C1D0U1) = 0;
+ __MOVAVG(next, C0D1U0) = 0;
+ __MOVAVG(next, C0D1U1) = 0;
+ __MOVAVG(next, C1D1U0) = 0;
+ __MOVAVG(next, C1D1U1) = 0;
+
+ for (i = 0; i < samples; i++) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ /*
+ * Generate next sample of data.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_PT1);
+ }
+ }
+
+ /*
+ * Seems like it should be part of the
+ * 'if (last_timing->periodic_training)' conditional
+ * since is already done for the else clause.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+ }
+
+ if (type == PERIODIC_TRAINING_SEQUENCE) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+ }
+
+ return adel;
+}
+
+static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+ u32 list[] = {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_DATA_BRLSHFT_0,
+ EMC_DATA_BRLSHFT_1
+ };
+ struct tegra210_emc_timing *last = emc->last;
+ unsigned int items = ARRAY_SIZE(list), i;
+ unsigned long delay;
+
+ if (last->periodic_training) {
+ emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
+
+ value = emc_readl(emc, EMC_DBG);
+ emc_cfg_o = emc_readl(emc, EMC_CFG);
+ emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
+ EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_PD |
+ EMC_CFG_DRAM_CLKSTOP_PD);
+
+
+ /*
+ * 1. Power optimizations should be off.
+ */
+ emc_writel(emc, emc_cfg, EMC_CFG);
+
+ /* Does emc_timing_update() for above changes. */
+ tegra210_emc_dll_disable(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
+ value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
+ value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_UPDATE);
+
+ /*
+ * 2. osc kick off - this assumes training and dvfs have set
+ * correct MR23.
+ */
+ tegra210_emc_start_periodic_compensation(emc);
+
+ /*
+ * 3. Let dram capture its clock tree delays.
+ */
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay /= last->rate + 1;
+ udelay(delay);
+
+ /*
+ * 4. Check delta wrt previous values (save value if margin
+ * exceeds what is set in table).
+ */
+ del = periodic_compensation_handler(emc,
+ PERIODIC_TRAINING_SEQUENCE,
+ last, last);
+
+ /*
+ * 5. Apply compensation w.r.t. trained values (if clock tree
+ * has drifted more than the set margin).
+ */
+ if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
+ for (i = 0; i < items; i++) {
+ value = tegra210_emc_compensate(last, list[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ list[i], value);
+ emc_writel(emc, value, list[i]);
+ }
+ }
+
+ emc_writel(emc, emc_cfg_o, EMC_CFG);
+
+ /*
+ * 6. Timing update actally applies the new trimmers.
+ */
+ tegra210_emc_timing_update(emc);
+
+ /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
+ emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
+
+ /* 6.2. Restore the DLL. */
+ tegra210_emc_dll_enable(emc);
+ }
+
+ return 0;
+}
+
+/*
+ * Do the clock change sequence.
+ */
+static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ /* state variables */
+ static bool fsp_for_next_freq;
+ /* constant configuration parameters */
+ const bool save_restore_clkstop_pd = true;
+ const u32 zqcal_before_cc_cutoff = 2400;
+ const bool cya_allow_ref_cc = false;
+ const bool cya_issue_pc_ref = false;
+ const bool opt_cc_short_zcal = true;
+ const bool ref_b4_sref_en = false;
+ const u32 tZQCAL_lpddr4 = 1000000;
+ const bool opt_short_zcal = true;
+ const bool opt_do_sw_qrst = true;
+ const u32 opt_dvfs_mode = MAN_SR;
+ /*
+ * This is the timing table for the source frequency. It does _not_
+ * necessarily correspond to the actual timing values in the EMC at the
+ * moment. If the boot BCT differs from the table then this can happen.
+ * However, we need it for accessing the dram_timings (which are not
+ * really registers) array for the current frequency.
+ */
+ struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
+ u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
+ u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
+ u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
+ u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
+ u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
+ u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
+ bool opt_zcal_en_cc = false, is_lpddr3 = false;
+ bool compensate_trimmer_applicable = false;
+ u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
+ u32 src_clk_period, dst_clk_period; /* in picoseconds */
+ bool shared_zq_resistor = false;
+ u32 value, dram_type;
+ u32 opt_dll_mode = 0;
+ unsigned long delay;
+ unsigned int i;
+
+ emc_dbg(emc, INFO, "Running clock change.\n");
+
+ /* XXX fake == last */
+ fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
+ fsp_for_next_freq = !fsp_for_next_freq;
+
+ value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
+ shared_zq_resistor = true;
+
+ if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
+ last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
+ dram_type == DRAM_TYPE_LPDDR4)
+ opt_zcal_en_cc = true;
+
+ if (dram_type == DRAM_TYPE_DDR3)
+ opt_dll_mode = tegra210_emc_get_dll_state(next);
+
+ if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
+ (dram_type == DRAM_TYPE_LPDDR2))
+ is_lpddr3 = true;
+
+ emc_readl(emc, EMC_CFG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG);
+
+ src_clk_period = 1000000000 / last->rate;
+ dst_clk_period = 1000000000 / next->rate;
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff)
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
+ else
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
+
+ tZQCAL_lpddr4_fc_adj /= dst_clk_period;
+
+ emc_dbg = emc_readl(emc, EMC_DBG);
+ emc_pin = emc_readl(emc, EMC_PIN);
+ emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
+
+ emc_cfg = next->burst_regs[EMC_CFG_INDEX];
+ emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
+ emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
+ emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
+
+ emc_dbg(emc, INFO, "Clock change version: %d\n",
+ DVFS_CLOCK_CHANGE_VERSION);
+ emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
+ emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
+ emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
+ emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
+ emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
+ next->rate);
+ emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
+ src_clk_period, dst_clk_period);
+ emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
+ emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
+ emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
+
+ /*
+ * Step 1:
+ * Pre DVFS SW sequence.
+ */
+ emc_dbg(emc, STEPS, "Step 1\n");
+ emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
+
+ emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
+
+ emc_auto_cal_config = next->emc_auto_cal_config;
+ auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+ emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
+ emc_auto_cal_config |= auto_cal_en;
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
+
+ emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ if (next->periodic_training) {
+ tegra210_emc_reset_dram_clktree_values(next);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ tegra210_emc_start_periodic_compensation(emc);
+
+ delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
+ udelay((delay / last->rate) + 2);
+
+ value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
+ next);
+ value = (value * 128 * next->rate / 1000) / 1000000;
+
+ if (next->periodic_training && value > next->tree_margin)
+ compensate_trimmer_applicable = true;
+ }
+
+ emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
+ EMC_CFG_PIPE_CLK);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
+ ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+
+ bg_reg_mode_change =
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
+ enable_bglp_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
+ enable_bg_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+
+ if (enable_bglp_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /* Check if we need to turn on VREF generator. */
+ if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
+ (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
+ u32 pad_tx_ctrl =
+ next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 last_pad_tx_ctrl =
+ last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 next_dq_e_ivref, next_dqs_e_ivref;
+
+ next_dqs_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
+ next_dq_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
+ value = (last_pad_tx_ctrl &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
+ next_dq_e_ivref | next_dqs_e_ivref;
+ emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
+ udelay(1);
+ } else if (bg_reg_mode_change) {
+ udelay(1);
+ }
+
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 2:
+ * Prelock the DLL.
+ */
+ emc_dbg(emc, STEPS, "Step 2\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
+ EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
+ value = tegra210_emc_dll_prelock(emc, clksrc);
+ emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
+ } else {
+ emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
+ tegra210_emc_dll_disable(emc);
+ }
+
+ /*
+ * Step 3:
+ * Prepare autocal for the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 3\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
+ emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
+ emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
+ emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
+ emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
+ emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
+ emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
+ auto_cal_en);
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /*
+ * Step 4:
+ * Update EMC_CFG. (??)
+ */
+ emc_dbg(emc, STEPS, "Step 4\n");
+
+ if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
+ else
+ emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
+
+ /*
+ * Step 5:
+ * Prepare reference variables for ZQCAL regs.
+ */
+ emc_dbg(emc, STEPS, "Step 5\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
+ else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
+ zq_wait_long = max(next->min_mrs_wait,
+ div_o3(360000, dst_clk_period)) + 4;
+ else if (dram_type == DRAM_TYPE_DDR3)
+ zq_wait_long = max((u32)256,
+ div_o3(320000, dst_clk_period) + 2);
+ else
+ zq_wait_long = 0;
+
+ /*
+ * Step 6:
+ * Training code - removed.
+ */
+ emc_dbg(emc, STEPS, "Step 6\n");
+
+ /*
+ * Step 7:
+ * Program FSP reference registers and send MRWs to new FSPWR.
+ */
+ emc_dbg(emc, STEPS, "Step 7\n");
+ emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
+
+ /* WAR 200024907 */
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ u32 nRTP = 16;
+
+ if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
+ nRTP = 14;
+
+ if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
+ nRTP = 12;
+
+ if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
+ nRTP = 10;
+
+ if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
+ nRTP = 8;
+
+ deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
+
+ /*
+ * Originally there was a + .5 in the tRPST calculation.
+ * However since we can't do FP in the kernel and the tRTM
+ * computation was in a floating point ceiling function, adding
+ * one to tRTP should be ok. There is no other source of non
+ * integer values, so the result was always going to be
+ * something for the form: f_ceil(N + .5) = N + 1;
+ */
+ tRPST = (last->emc_mrw & 0x80) >> 7;
+ tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
+ max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
+ 1 + nRTP;
+
+ emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
+ next->burst_regs[EMC_RP_INDEX]);
+
+ if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
+ if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
+ last->burst_regs[EMC_RP_INDEX])) {
+ R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+
+ if (R2P_war > 63) {
+ RP_war = R2P_war +
+ last->burst_regs[EMC_RP_INDEX] - 63;
+
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+
+ R2P_war = 63;
+ }
+ } else {
+ R2P_war = last->burst_regs[EMC_R2P_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+ }
+
+ if (RP_war < deltaTWATM) {
+ W2P_war = last->burst_regs[EMC_W2P_INDEX]
+ + deltaTWATM - RP_war;
+ if (W2P_war > 63) {
+ RP_war = RP_war + W2P_war - 63;
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+ W2P_war = 63;
+ }
+ } else {
+ W2P_war = last->burst_regs[
+ EMC_W2P_INDEX];
+ }
+
+ if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
+ (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
+ (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
+ (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
+ emc_writel(emc, RP_war, EMC_RP);
+ emc_writel(emc, R2P_war, EMC_R2P);
+ emc_writel(emc, W2P_war, EMC_W2P);
+ emc_writel(emc, TRPab_war, EMC_TRPAB);
+ }
+
+ tegra210_emc_timing_update(emc);
+ } else {
+ emc_dbg(emc, INFO, "Skipped WAR\n");
+ }
+ }
+
+ if (!fsp_for_next_freq) {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
+ } else {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
+ emc_writel(emc, next->emc_mrw, EMC_MRW);
+ emc_writel(emc, next->emc_mrw2, EMC_MRW2);
+ }
+
+ /*
+ * Step 8:
+ * Program the shadow registers.
+ */
+ emc_dbg(emc, STEPS, "Step 8\n");
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
+
+ for (i = 0; i < next->num_burst; i++) {
+ const u16 *offsets = emc->offsets->burst;
+ u16 offset;
+
+ if (!offsets[i])
+ continue;
+
+ value = next->burst_regs[i];
+ offset = offsets[i];
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (offset == EMC_MRW6 || offset == EMC_MRW7 ||
+ offset == EMC_MRW8 || offset == EMC_MRW9 ||
+ offset == EMC_MRW10 || offset == EMC_MRW11 ||
+ offset == EMC_MRW12 || offset == EMC_MRW13 ||
+ offset == EMC_MRW14 || offset == EMC_MRW15 ||
+ offset == EMC_TRAINING_CTRL))
+ continue;
+
+ /* Pain... And suffering. */
+ if (offset == EMC_CFG) {
+ value &= ~EMC_CFG_DRAM_ACPD;
+ value &= ~EMC_CFG_DYN_SELF_REF;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
+ value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
+ }
+ } else if (offset == EMC_MRS_WAIT_CNT &&
+ dram_type == DRAM_TYPE_LPDDR2 &&
+ opt_zcal_en_cc && !opt_cc_short_zcal &&
+ opt_short_zcal) {
+ value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
+ ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_WAIT_CNT &&
+ dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
+ !opt_cc_short_zcal && opt_short_zcal) {
+ value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
+ EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
+ ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
+ value = 0; /* EMC_ZCAL_INTERVAL reset value. */
+ } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
+ value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
+ } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
+ value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
+ value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
+ value &= 0xf800f800;
+ } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
+ value &= 0xfffffff0;
+ }
+
+ emc_writel(emc, value, offset);
+ }
+
+ /* SW addition: do EMC refresh adjustment here. */
+ tegra210_emc_adjust_timing(emc, next);
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (23 << EMC_MRW_MRW_MA_SHIFT) |
+ (next->run_clocks & EMC_MRW_MRW_OP_MASK);
+ emc_writel(emc, value, EMC_MRW);
+ }
+
+ /* Per channel burst registers. */
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
+
+ for (i = 0; i < next->num_burst_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *burst =
+ emc->offsets->burst_per_channel;
+
+ if (!burst[i].offset)
+ continue;
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (burst[i].offset == EMC_MRW6 ||
+ burst[i].offset == EMC_MRW7 ||
+ burst[i].offset == EMC_MRW8 ||
+ burst[i].offset == EMC_MRW9 ||
+ burst[i].offset == EMC_MRW10 ||
+ burst[i].offset == EMC_MRW11 ||
+ burst[i].offset == EMC_MRW12 ||
+ burst[i].offset == EMC_MRW13 ||
+ burst[i].offset == EMC_MRW14 ||
+ burst[i].offset == EMC_MRW15))
+ continue;
+
+ /* Filter out second channel if not in DUAL_CHANNEL mode. */
+ if (emc->num_channels < 2 && burst[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->burst_reg_per_ch[i], burst[i].offset);
+ emc_channel_writel(emc, burst[i].bank,
+ next->burst_reg_per_ch[i],
+ burst[i].offset);
+ }
+
+ /* Vref regs. */
+ emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
+
+ for (i = 0; i < next->vref_num; i++) {
+ const struct tegra210_emc_per_channel_regs *vref =
+ emc->offsets->vref_per_channel;
+
+ if (!vref[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && vref[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->vref_perch_regs[i], vref[i].offset);
+ emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
+ vref[i].offset);
+ }
+
+ /* Trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
+
+ for (i = 0; i < next->num_trim; i++) {
+ const u16 *offsets = emc->offsets->trim;
+
+ if (!offsets[i])
+ continue;
+
+ if (compensate_trimmer_applicable &&
+ (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offsets[i] == EMC_DATA_BRLSHFT_0 ||
+ offsets[i] == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offsets[i]);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offsets[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ (u32)(u64)offsets[i], value);
+ emc_writel(emc, value, offsets[i]);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_regs[i], offsets[i]);
+ emc_writel(emc, next->trim_regs[i], offsets[i]);
+ }
+ }
+
+ /* Per channel trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
+
+ for (i = 0; i < next->num_trim_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *trim =
+ &emc->offsets->trim_per_channel[0];
+ unsigned int offset;
+
+ if (!trim[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && trim[i].bank >= 1)
+ continue;
+
+ offset = trim[i].offset;
+
+ if (compensate_trimmer_applicable &&
+ (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offset == EMC_DATA_BRLSHFT_0 ||
+ offset == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offset);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offset);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
+ value);
+ emc_channel_writel(emc, trim[i].bank, value, offset);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_perch_regs[i], offset);
+ emc_channel_writel(emc, trim[i].bank,
+ next->trim_perch_regs[i], offset);
+ }
+ }
+
+ emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
+
+ for (i = 0; i < next->num_mc_regs; i++) {
+ const u16 *offsets = emc->offsets->burst_mc;
+ u32 *values = next->burst_mc_regs;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ values[i], offsets[i]);
+ mc_writel(emc->mc, values[i], offsets[i]);
+ }
+
+ /* Registers to be programmed on the faster clock. */
+ if (next->rate < last->rate) {
+ const u16 *la = emc->offsets->la_scale;
+
+ emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
+
+ for (i = 0; i < next->num_up_down; i++) {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->la_scale_regs[i], la[i]);
+ mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
+ }
+ }
+
+ /* Flush all the burst register writes. */
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ /*
+ * Step 9:
+ * LPDDR4 section A.
+ */
+ emc_dbg(emc, STEPS, "Step 9\n");
+
+ value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
+ value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
+
+ value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
+ EMC_DBG_WRITE_ACTIVE_ONLY);
+
+ emc_writel(emc, value, EMC_DBG);
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, emc_dbg, EMC_DBG);
+ }
+
+ /*
+ * Step 10:
+ * LPDDR4 and DDR3 common section.
+ */
+ emc_dbg(emc, STEPS, "Step 10\n");
+
+ if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
+ else
+ ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
+
+ if (dram_type == DRAM_TYPE_LPDDR4 &&
+ dst_clk_period <= zqcal_before_cc_cutoff) {
+ ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW6_INDEX] &
+ 0x0000C0C0), EMC_MRW6, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW14_INDEX] &
+ 0x00003838), EMC_MRW14, 0);
+
+ if (emc->num_devices > 1) {
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW7_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW7_INDEX] &
+ 0x0000C0C0), EMC_MRW7, 0);
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW15_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW15_INDEX] &
+ 0x00003838), EMC_MRW15, 0);
+ }
+
+ if (opt_zcal_en_cc) {
+ if (emc->num_devices < 2)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else if (shared_zq_resistor)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else
+ ccfifo_writel(emc,
+ EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
+ ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
+ ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
+ delay = 30;
+
+ if (cya_allow_ref_cc) {
+ delay += (1000 * fake->dram_timings[T_RP]) /
+ src_clk_period;
+ delay += 4000 * fake->dram_timings[T_RFC];
+ }
+
+ ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
+ EMC_PIN_PIN_CKEB |
+ EMC_PIN_PIN_CKE),
+ EMC_PIN, delay);
+ }
+
+ /* calculate reference delay multiplier */
+ value = 1;
+
+ if (ref_b4_sref_en)
+ value++;
+
+ if (cya_allow_ref_cc)
+ value++;
+
+ if (cya_issue_pc_ref)
+ value++;
+
+ if (dram_type != DRAM_TYPE_LPDDR4) {
+ delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
+ (1000 * fake->dram_timings[T_RFC] / src_clk_period));
+ delay = value * delay + 20;
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Step 11:
+ * Ramp down.
+ */
+ emc_dbg(emc, STEPS, "Step 11\n");
+
+ ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
+
+ value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
+ 0);
+
+ /*
+ * Step 12:
+ * And finally - trigger the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 12\n");
+
+ ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
+ value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ /*
+ * Step 13:
+ * Ramp up.
+ */
+ emc_dbg(emc, STEPS, "Step 13\n");
+
+ ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+
+ /*
+ * Step 14:
+ * Bringup CKE pins.
+ */
+ emc_dbg(emc, STEPS, "Step 14\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = emc_pin | EMC_PIN_PIN_CKE;
+
+ if (emc->num_devices <= 1)
+ value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
+ else
+ value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
+
+ ccfifo_writel(emc, value, EMC_PIN, 0);
+ }
+
+ /*
+ * Step 15: (two step 15s ??)
+ * Calculate zqlatch wait time; has dependency on ramping times.
+ */
+ emc_dbg(emc, STEPS, "Step 15\n");
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff) {
+ s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
+ (s32)dst_clk_period;
+ zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
+ } else {
+ zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
+ div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+ }
+
+ emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
+ emc_dbg(emc, INFO, "dst_clk_period = %u\n",
+ dst_clk_period);
+ emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
+ next->dram_timings[T_PDEX]);
+ emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+
+ if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
+ delay = div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+
+ if (emc->num_devices < 2) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ } else if (shared_zq_resistor) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time) +
+ delay);
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL, 0);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, 0);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ tZQCAL_lpddr4 / dst_clk_period);
+ } else {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ }
+ }
+
+ /* WAR: delay for zqlatch */
+ ccfifo_writel(emc, 0, 0, 10);
+
+ /*
+ * Step 16:
+ * LPDDR4 Conditional Training Kickoff. Removed.
+ */
+
+ /*
+ * Step 17:
+ * MANSR exit self refresh.
+ */
+ emc_dbg(emc, STEPS, "Step 17\n");
+
+ if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+
+ /*
+ * Step 18:
+ * Send MRWs to LPDDR3/DDR3.
+ */
+ emc_dbg(emc, STEPS, "Step 18\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
+ ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
+ if (is_lpddr3)
+ ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ if (opt_dll_mode)
+ ccfifo_writel(emc, next->emc_emrs &
+ ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
+ ccfifo_writel(emc, next->emc_emrs2 &
+ ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
+ ccfifo_writel(emc, next->emc_mrs |
+ EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
+ }
+
+ /*
+ * Step 19:
+ * ZQCAL for LPDDR3/DDR3
+ */
+ emc_dbg(emc, STEPS, "Step 19\n");
+
+ if (opt_zcal_en_cc) {
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ value = opt_cc_short_zcal ? 90000 : 360000;
+ value = div_o3(value, dst_clk_period);
+ value = value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
+ value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
+
+ value = opt_cc_short_zcal ? 0x56 : 0xab;
+ ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT,
+ EMC_MRW, 0);
+
+ if (emc->num_devices > 1) {
+ value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRW, 0);
+ }
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
+
+ ccfifo_writel(emc, value |
+ 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ 0);
+
+ if (emc->num_devices > 1) {
+ value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD;
+ ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (bg_reg_mode_change) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+
+ if (ramp_up_wait <= 1250000)
+ delay = (1250000 - ramp_up_wait) / dst_clk_period;
+ else
+ delay = 0;
+
+ ccfifo_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
+ EMC_PMACRO_BG_BIAS_CTRL_0, delay);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 20:
+ * Issue ref and optional QRST.
+ */
+ emc_dbg(emc, STEPS, "Step 20\n");
+
+ if (dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ if (opt_do_sw_qrst) {
+ ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
+ ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
+ }
+
+ /*
+ * Step 21:
+ * Restore ZCAL and ZCAL interval.
+ */
+ emc_dbg(emc, STEPS, "Step 21\n");
+
+ if (save_restore_clkstop_pd || opt_zcal_en_cc) {
+ ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ EMC_DBG, 0);
+ if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL, 0);
+
+ if (save_restore_clkstop_pd)
+ ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
+ ~EMC_CFG_DYN_SELF_REF,
+ EMC_CFG, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+ }
+
+ /*
+ * Step 22:
+ * Restore EMC_CFG_PIPE_CLK.
+ */
+ emc_dbg(emc, STEPS, "Step 22\n");
+
+ ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ else
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /*
+ * Step 23:
+ */
+ emc_dbg(emc, STEPS, "Step 23\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_do_clock_change(emc, clksrc);
+
+ /*
+ * Step 24:
+ * Save training results. Removed.
+ */
+
+ /*
+ * Step 25:
+ * Program MC updown registers.
+ */
+ emc_dbg(emc, STEPS, "Step 25\n");
+
+ if (next->rate > last->rate) {
+ for (i = 0; i < next->num_up_down; i++)
+ mc_writel(emc->mc, next->la_scale_regs[i],
+ emc->offsets->la_scale[i]);
+
+ tegra210_emc_timing_update(emc);
+ }
+
+ /*
+ * Step 26:
+ * Restore ZCAL registers.
+ */
+ emc_dbg(emc, STEPS, "Step 26\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
+ !opt_short_zcal && opt_cc_short_zcal) {
+ udelay(2);
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ if (dram_type == DRAM_TYPE_LPDDR2)
+ emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
+ EMC_MRS_WAIT_CNT);
+ else if (dram_type == DRAM_TYPE_DDR3)
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 27:
+ * Restore EMC_CFG, FDPD registers.
+ */
+ emc_dbg(emc, STEPS, "Step 27\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+ emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+
+ /*
+ * Step 28:
+ * Training recover. Removed.
+ */
+ emc_dbg(emc, STEPS, "Step 28\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
+ EMC_PMACRO_AUTOCAL_CFG_COMMON);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 29:
+ * Power fix WAR.
+ */
+ emc_dbg(emc, STEPS, "Step 29\n");
+
+ emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
+ EMC_PMACRO_CFG_PM_GLOBAL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_1);
+ emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
+
+ /*
+ * Step 30:
+ * Re-enable autocal.
+ */
+ emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+ }
+
+ emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /* Done! Yay. */
+}
+
+const struct tegra210_emc_sequence tegra210_emc_r21021 = {
+ .revision = 0x7,
+ .set_clock = tegra210_emc_r21021_set_clock,
+ .periodic_compensation = tegra210_emc_r21021_periodic_compensation,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
new file mode 100644
index 000000000000..cdd663ba4733
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -0,0 +1,2100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define EMC_CLK_EMC_2X_CLK_SRC_SHIFT 29
+#define EMC_CLK_EMC_2X_CLK_SRC_MASK \
+ (0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
+#define EMC_CLK_SOURCE_PLLM_LJ 0x4
+#define EMC_CLK_SOURCE_PLLMB_LJ 0x5
+#define EMC_CLK_FORCE_CC_TRIGGER BIT(27)
+#define EMC_CLK_MC_EMC_SAME_FREQ BIT(16)
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT 0
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK \
+ (0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL */
+#define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT 29
+#define DLL_CLK_EMC_DLL_CLK_SRC_MASK \
+ (0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT 10
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK \
+ (0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
+#define PLLM_VCOA 0
+#define PLLM_VCOB 1
+#define EMC_DLL_SWITCH_OUT 2
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT 0
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK \
+ (0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
+
+/* MC_EMEM_ARB_MISC0 */
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ BIT(27)
+
+/* EMC_DATA_BRLSHFT_X */
+#define EMC0_EMC_DATA_BRLSHFT_0_INDEX 2
+#define EMC1_EMC_DATA_BRLSHFT_0_INDEX 3
+#define EMC0_EMC_DATA_BRLSHFT_1_INDEX 4
+#define EMC1_EMC_DATA_BRLSHFT_1_INDEX 5
+
+#define TRIM_REG(chan, rank, reg, byte) \
+ (((EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _MASK & \
+ next->trim_regs[EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## \
+ rank ## _ ## reg ## _INDEX]) >> \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _SHIFT) \
+ + \
+ (((EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_MASK & \
+ next->trim_perch_regs[EMC ## chan ## \
+ _EMC_DATA_BRLSHFT_ ## rank ## _INDEX]) >> \
+ EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_SHIFT) * 64))
+
+#define CALC_TEMP(rank, reg, byte1, byte2, n) \
+ (((new[n] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## \
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _MASK) \
+ | \
+ ((new[n + 1] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##\
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _MASK))
+
+#define REFRESH_SPEEDUP(value, speedup) \
+ (((value) & 0xffff0000) | ((value) & 0xffff) * (speedup))
+
+#define LPDDR2_MR4_SRR GENMASK(2, 0)
+
+static const struct tegra210_emc_sequence *tegra210_emc_sequences[] = {
+ &tegra210_emc_r21021,
+};
+
+static const struct tegra210_emc_table_register_offsets
+tegra210_emc_table_register_offsets = {
+ .burst = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFCPB,
+ EMC_REFCTRL2,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_R2R,
+ EMC_TPPD,
+ EMC_CCDMW,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV_CHK,
+ EMC_WDV,
+ EMC_WSV,
+ EMC_WEV,
+ EMC_WDV_MASK,
+ EMC_WS_DURATION,
+ EMC_WE_DURATION,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_OBDLY,
+ EMC_EINPUT,
+ EMC_MRW6,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_RDV_EARLY,
+ EMC_RDV_EARLY_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_CKE2PDEN,
+ EMC_PDEX2CKE,
+ EMC_PDEX2MRR,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_MRW7,
+ EMC_TREFBW,
+ EMC_ODT_WRITE,
+ EMC_FBIO_CFG5,
+ EMC_FBIO_CFG7,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_PMACRO_IB_RXRT,
+ EMC_CFG_PIPE_1,
+ EMC_CFG_PIPE_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_4,
+ EMC_PMACRO_QUSE_DDLL_RANK0_5,
+ EMC_PMACRO_QUSE_DDLL_RANK1_4,
+ EMC_PMACRO_QUSE_DDLL_RANK1_5,
+ EMC_MRW8,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5,
+ EMC_PMACRO_DDLL_LONG_CMD_0,
+ EMC_PMACRO_DDLL_LONG_CMD_1,
+ EMC_PMACRO_DDLL_LONG_CMD_2,
+ EMC_PMACRO_DDLL_LONG_CMD_3,
+ EMC_PMACRO_DDLL_LONG_CMD_4,
+ EMC_PMACRO_DDLL_SHORT_CMD_0,
+ EMC_PMACRO_DDLL_SHORT_CMD_1,
+ EMC_PMACRO_DDLL_SHORT_CMD_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3,
+ EMC_TXDSRVTTGEN,
+ EMC_FDPD_CTRL_DQ,
+ EMC_FDPD_CTRL_CMD,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_INTERVAL,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_AUTO_CAL_CHANNEL,
+ EMC_DLL_CFG_0,
+ EMC_DLL_CFG_1,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON,
+ EMC_PMACRO_ZCTRL,
+ EMC_CFG,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP,
+ EMC_DQS_BRLSHFT_0,
+ EMC_DQS_BRLSHFT_1,
+ EMC_CMD_BRLSHFT_2,
+ EMC_CMD_BRLSHFT_3,
+ EMC_PMACRO_PAD_CFG_CTRL,
+ EMC_PMACRO_DATA_PAD_RX_CTRL,
+ EMC_PMACRO_CMD_PAD_RX_CTRL,
+ EMC_PMACRO_DATA_RX_TERM_MODE,
+ EMC_PMACRO_CMD_RX_TERM_MODE,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ EMC_PMACRO_DATA_PAD_TX_CTRL,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ EMC_PMACRO_VTTGEN_CTRL_0,
+ EMC_PMACRO_VTTGEN_CTRL_1,
+ EMC_PMACRO_VTTGEN_CTRL_2,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ EMC_PMACRO_CMD_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BRICK_CTRL_RFU2,
+ EMC_PMACRO_DATA_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BG_BIAS_CTRL_0,
+ EMC_CFG_3,
+ EMC_PMACRO_TX_PWRD_0,
+ EMC_PMACRO_TX_PWRD_1,
+ EMC_PMACRO_TX_PWRD_2,
+ EMC_PMACRO_TX_PWRD_3,
+ EMC_PMACRO_TX_PWRD_4,
+ EMC_PMACRO_TX_PWRD_5,
+ EMC_CONFIG_SAMPLE_DELAY,
+ EMC_PMACRO_TX_SEL_CLK_SRC_0,
+ EMC_PMACRO_TX_SEL_CLK_SRC_1,
+ EMC_PMACRO_TX_SEL_CLK_SRC_2,
+ EMC_PMACRO_TX_SEL_CLK_SRC_3,
+ EMC_PMACRO_TX_SEL_CLK_SRC_4,
+ EMC_PMACRO_TX_SEL_CLK_SRC_5,
+ EMC_PMACRO_DDLL_BYPASS,
+ EMC_PMACRO_DDLL_PWRD_0,
+ EMC_PMACRO_DDLL_PWRD_1,
+ EMC_PMACRO_DDLL_PWRD_2,
+ EMC_PMACRO_CMD_CTRL_0,
+ EMC_PMACRO_CMD_CTRL_1,
+ EMC_PMACRO_CMD_CTRL_2,
+ EMC_TR_TIMING_0,
+ EMC_TR_DVFS,
+ EMC_TR_CTRL_1,
+ EMC_TR_RDV,
+ EMC_TR_QPOP,
+ EMC_TR_RDV_MASK,
+ EMC_MRW14,
+ EMC_TR_QSAFE,
+ EMC_TR_QRST,
+ EMC_TRAINING_CTRL,
+ EMC_TRAINING_SETTLE,
+ EMC_TRAINING_VREF_SETTLE,
+ EMC_TRAINING_CA_FINE_CTRL,
+ EMC_TRAINING_CA_CTRL_MISC,
+ EMC_TRAINING_CA_CTRL_MISC1,
+ EMC_TRAINING_CA_VREF_CTRL,
+ EMC_TRAINING_QUSE_CORS_CTRL,
+ EMC_TRAINING_QUSE_FINE_CTRL,
+ EMC_TRAINING_QUSE_CTRL_MISC,
+ EMC_TRAINING_QUSE_VREF_CTRL,
+ EMC_TRAINING_READ_FINE_CTRL,
+ EMC_TRAINING_READ_CTRL_MISC,
+ EMC_TRAINING_READ_VREF_CTRL,
+ EMC_TRAINING_WRITE_FINE_CTRL,
+ EMC_TRAINING_WRITE_CTRL_MISC,
+ EMC_TRAINING_WRITE_VREF_CTRL,
+ EMC_TRAINING_MPC,
+ EMC_MRW15,
+ },
+ .trim = {
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_IB_VREF_DQS_0,
+ EMC_PMACRO_IB_VREF_DQS_1,
+ EMC_PMACRO_IB_VREF_DQ_0,
+ EMC_PMACRO_IB_VREF_DQ_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_0,
+ EMC_PMACRO_QUSE_DDLL_RANK0_1,
+ EMC_PMACRO_QUSE_DDLL_RANK0_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_3,
+ EMC_PMACRO_QUSE_DDLL_RANK1_0,
+ EMC_PMACRO_QUSE_DDLL_RANK1_1,
+ EMC_PMACRO_QUSE_DDLL_RANK1_2,
+ EMC_PMACRO_QUSE_DDLL_RANK1_3
+ },
+ .burst_mc = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_REFPB_HP_CTRL,
+ MC_EMEM_ARB_REFPB_BANK_CTRL,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_CCDMW,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_TIMING_RFCPB,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_MISC2,
+ MC_EMEM_ARB_RING1_THROTTLE,
+ MC_EMEM_ARB_DHYST_CTRL,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7,
+ },
+ .la_scale = {
+ MC_MLL_MPCORER_PTSA_RATE,
+ MC_FTOP_PTSA_RATE,
+ MC_PTSA_GRANT_DECREMENT,
+ MC_LATENCY_ALLOWANCE_XUSB_0,
+ MC_LATENCY_ALLOWANCE_XUSB_1,
+ MC_LATENCY_ALLOWANCE_TSEC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCA_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAA_0,
+ MC_LATENCY_ALLOWANCE_SDMMC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAB_0,
+ MC_LATENCY_ALLOWANCE_PPCS_0,
+ MC_LATENCY_ALLOWANCE_PPCS_1,
+ MC_LATENCY_ALLOWANCE_MPCORE_0,
+ MC_LATENCY_ALLOWANCE_HC_0,
+ MC_LATENCY_ALLOWANCE_HC_1,
+ MC_LATENCY_ALLOWANCE_AVPC_0,
+ MC_LATENCY_ALLOWANCE_GPU_0,
+ MC_LATENCY_ALLOWANCE_GPU2_0,
+ MC_LATENCY_ALLOWANCE_NVENC_0,
+ MC_LATENCY_ALLOWANCE_NVDEC_0,
+ MC_LATENCY_ALLOWANCE_VIC_0,
+ MC_LATENCY_ALLOWANCE_VI2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_1,
+ },
+ .burst_per_channel = {
+ { .bank = 0, .offset = EMC_MRW10, },
+ { .bank = 1, .offset = EMC_MRW10, },
+ { .bank = 0, .offset = EMC_MRW11, },
+ { .bank = 1, .offset = EMC_MRW11, },
+ { .bank = 0, .offset = EMC_MRW12, },
+ { .bank = 1, .offset = EMC_MRW12, },
+ { .bank = 0, .offset = EMC_MRW13, },
+ { .bank = 1, .offset = EMC_MRW13, },
+ },
+ .trim_per_channel = {
+ { .bank = 0, .offset = EMC_CMD_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_CMD_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_2, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_3, },
+ },
+ .vref_per_channel = {
+ {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ },
+ },
+};
+
+static void tegra210_emc_train(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, training);
+ unsigned long flags;
+
+ if (!emc->last)
+ return;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (emc->sequence->periodic_compensation)
+ emc->sequence->periodic_compensation(emc);
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_start(struct tegra210_emc *emc)
+{
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_stop(struct tegra210_emc *emc)
+{
+ del_timer(&emc->training);
+}
+
+static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
+{
+ unsigned long flags;
+ u32 value, max = 0;
+ unsigned int i;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ for (i = 0; i < emc->num_devices; i++) {
+ value = tegra210_emc_mrr_read(emc, i, 4);
+
+ if (value & BIT(7))
+ dev_dbg(emc->dev,
+ "sensor reading changed for device %u: %08x\n",
+ i, value);
+
+ value = FIELD_GET(LPDDR2_MR4_SRR, value);
+ if (value > max)
+ max = value;
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return max;
+}
+
+static void tegra210_emc_poll_refresh(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+ unsigned int temperature;
+
+ if (!emc->debugfs.temperature)
+ temperature = tegra210_emc_get_temperature(emc);
+ else
+ temperature = emc->debugfs.temperature;
+
+ if (temperature == emc->temperature)
+ goto reset;
+
+ switch (temperature) {
+ case 0 ... 3:
+ /* temperature is fine, using regular refresh */
+ dev_dbg(emc->dev, "switching to nominal refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_NOMINAL);
+ break;
+
+ case 4:
+ dev_dbg(emc->dev, "switching to 2x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_2X);
+ break;
+
+ case 5:
+ dev_dbg(emc->dev, "switching to 4x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_4X);
+ break;
+
+ case 6 ... 7:
+ dev_dbg(emc->dev, "switching to throttle refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_THROTTLE);
+ break;
+
+ default:
+ WARN(1, "invalid DRAM temperature state %u\n", temperature);
+ return;
+ }
+
+ emc->temperature = temperature;
+
+reset:
+ if (atomic_read(&emc->refresh_poll) > 0) {
+ unsigned int interval = emc->refresh_poll_interval;
+ unsigned int timeout = msecs_to_jiffies(interval);
+
+ mod_timer(&emc->refresh_timer, jiffies + timeout);
+ }
+}
+
+static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 0);
+ del_timer_sync(&emc->refresh_timer);
+}
+
+static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 1);
+
+ mod_timer(&emc->refresh_timer,
+ jiffies + msecs_to_jiffies(emc->refresh_poll_interval));
+}
+
+static int tegra210_emc_cd_max_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int tegra210_emc_cd_get_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ *state = atomic_read(&emc->refresh_poll);
+
+ return 0;
+}
+
+static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
+ unsigned long state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ if (state == atomic_read(&emc->refresh_poll))
+ return 0;
+
+ if (state)
+ tegra210_emc_poll_refresh_start(emc);
+ else
+ tegra210_emc_poll_refresh_stop(emc);
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+ .get_max_state = tegra210_emc_cd_max_state,
+ .get_cur_state = tegra210_emc_cd_get_state,
+ .set_cur_state = tegra210_emc_cd_set_state,
+};
+
+static void tegra210_emc_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ emc->sequence->set_clock(emc, clksrc);
+
+ if (emc->next->periodic_training)
+ tegra210_emc_training_start(emc);
+ else
+ tegra210_emc_training_stop(emc);
+}
+
+static void tegra210_change_dll_src(struct tegra210_emc *emc,
+ u32 clksrc)
+{
+ u32 dll_setting = emc->next->dll_clk_src;
+ u32 emc_clk_src;
+ u32 emc_clk_div;
+
+ emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
+ EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
+ emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
+ EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
+ DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
+ dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
+ dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
+ if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
+ dll_setting |= (PLLM_VCOB <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
+ dll_setting |= (PLLM_VCOA <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else
+ dll_setting |= (EMC_DLL_SWITCH_OUT <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+
+ tegra210_clk_emc_dll_update_setting(dll_setting);
+
+ if (emc->next->clk_out_enb_x_0_clk_enb_emc_dll)
+ tegra210_clk_emc_dll_enable(true);
+ else
+ tegra210_clk_emc_dll_enable(false);
+}
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh)
+{
+ struct tegra210_emc_timing *timings;
+ unsigned long flags;
+
+ if ((emc->dram_type != DRAM_TYPE_LPDDR2 &&
+ emc->dram_type != DRAM_TYPE_LPDDR4) ||
+ !emc->last)
+ return -ENODEV;
+
+ if (refresh > TEGRA210_EMC_REFRESH_THROTTLE)
+ return -EINVAL;
+
+ if (refresh == emc->refresh)
+ return 0;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (refresh == TEGRA210_EMC_REFRESH_THROTTLE && emc->derated)
+ timings = emc->derated;
+ else
+ timings = emc->nominal;
+
+ if (timings != emc->timings) {
+ unsigned int index = emc->last - emc->timings;
+ u32 clksrc;
+
+ clksrc = emc->provider.configs[index].value |
+ EMC_CLK_FORCE_CC_TRIGGER;
+
+ emc->next = &timings[index];
+ emc->timings = timings;
+
+ tegra210_emc_set_clock(emc, clksrc);
+ } else {
+ tegra210_emc_adjust_timing(emc, emc->last);
+ tegra210_emc_timing_update(emc);
+
+ if (refresh != TEGRA210_EMC_REFRESH_NOMINAL)
+ emc_writel(emc, EMC_REF_REF_CMD, EMC_REF);
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address)
+{
+ u32 value, ret = 0;
+ unsigned int i;
+
+ value = (chip & EMC_MRR_DEV_SEL_MASK) << EMC_MRR_DEV_SEL_SHIFT |
+ (address & EMC_MRR_MA_MASK) << EMC_MRR_MA_SHIFT;
+ emc_writel(emc, value, EMC_MRR);
+
+ for (i = 0; i < emc->num_channels; i++)
+ WARN(tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_MRR_DIVLD, 1),
+ "Timed out waiting for MRR %u (ch=%u)\n", address, i);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ value = emc_channel_readl(emc, i, EMC_MRR);
+ value &= EMC_MRR_DATA_MASK;
+
+ ret = (ret << 16) | value;
+ }
+
+ return ret;
+}
+
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc)
+{
+ int err;
+
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+ emc_readl(emc, EMC_INTSTATUS);
+
+ tegra210_clk_emc_update_setting(clksrc);
+
+ err = tegra210_emc_wait_for_update(emc, 0, EMC_INTSTATUS,
+ EMC_INTSTATUS_CLKCHANGE_COMPLETE,
+ true);
+ if (err)
+ dev_warn(emc->dev, "clock change completion error: %d\n", err);
+}
+
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (emc->timings[i].rate * 1000UL == rate)
+ return &emc->timings[i];
+
+ return NULL;
+}
+
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+ value = emc_channel_readl(emc, channel, offset);
+ if (!!(value & bit_mask) == state)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set)
+{
+ u32 emc_dbg = emc_readl(emc, EMC_DBG);
+
+ if (set)
+ emc_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ else
+ emc_writel(emc, emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+}
+
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next)
+{
+ if (next->emc_emrs & 0x1)
+ return 0;
+
+ return 1;
+}
+
+void tegra210_emc_timing_update(struct tegra210_emc *emc)
+{
+ unsigned int i;
+ int err = 0;
+
+ emc_writel(emc, 0x1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ err |= tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ false);
+ }
+
+ if (err)
+ dev_warn(emc->dev, "timing update error: %d\n", err);
+}
+
+unsigned long tegra210_emc_actual_osc_clocks(u32 in)
+{
+ if (in < 0x40)
+ return in * 16;
+ else if (in < 0x80)
+ return 2048;
+ else if (in < 0xc0)
+ return 4096;
+ else
+ return 8192;
+}
+
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 mpc_req = 0x4b;
+
+ emc_writel(emc, mpc_req, EMC_MPC);
+ mpc_req = emc_readl(emc, EMC_MPC);
+}
+
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset)
+{
+ u32 temp = 0, rate = next->rate / 1000;
+ s32 delta[4], delta_taps[4];
+ s32 new[] = {
+ TRIM_REG(0, 0, 0, 0),
+ TRIM_REG(0, 0, 0, 1),
+ TRIM_REG(0, 0, 1, 2),
+ TRIM_REG(0, 0, 1, 3),
+
+ TRIM_REG(1, 0, 2, 4),
+ TRIM_REG(1, 0, 2, 5),
+ TRIM_REG(1, 0, 3, 6),
+ TRIM_REG(1, 0, 3, 7),
+
+ TRIM_REG(0, 1, 0, 0),
+ TRIM_REG(0, 1, 0, 1),
+ TRIM_REG(0, 1, 1, 2),
+ TRIM_REG(0, 1, 1, 3),
+
+ TRIM_REG(1, 1, 2, 4),
+ TRIM_REG(1, 1, 2, 5),
+ TRIM_REG(1, 1, 3, 6),
+ TRIM_REG(1, 1, 3, 7)
+ };
+ unsigned i;
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ case EMC_DATA_BRLSHFT_0:
+ delta[0] = 128 * (next->current_dram_clktree[C0D0U0] -
+ next->trained_dram_clktree[C0D0U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D0U1] -
+ next->trained_dram_clktree[C0D0U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D0U0] -
+ next->trained_dram_clktree[C1D0U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D0U1] -
+ next->trained_dram_clktree[C1D0U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[i * 2] = new[i * 2] + delta_taps[i];
+ new[i * 2 + 1] = new[i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_0) {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] % 64;
+ }
+
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ case EMC_DATA_BRLSHFT_1:
+ delta[0] = 128 * (next->current_dram_clktree[C0D1U0] -
+ next->trained_dram_clktree[C0D1U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D1U1] -
+ next->trained_dram_clktree[C0D1U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D1U0] -
+ next->trained_dram_clktree[C1D1U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D1U1] -
+ next->trained_dram_clktree[C1D1U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[8 + i * 2] = new[8 + i * 2] +
+ delta_taps[i];
+ new[8 + i * 2 + 1] = new[8 + i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_1) {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] % 64;
+ }
+
+ break;
+ }
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ temp = CALC_TEMP(0, 0, 0, 1, 0);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ temp = CALC_TEMP(0, 1, 2, 3, 2);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ temp = CALC_TEMP(0, 2, 4, 5, 4);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ temp = CALC_TEMP(0, 3, 6, 7, 6);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ temp = CALC_TEMP(1, 0, 0, 1, 8);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ temp = CALC_TEMP(1, 1, 2, 3, 10);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ temp = CALC_TEMP(1, 2, 4, 5, 12);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ temp = CALC_TEMP(1, 3, 6, 7, 14);
+ break;
+
+ case EMC_DATA_BRLSHFT_0:
+ temp = ((new[0] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[1] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[2] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[3] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[4] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[5] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[6] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[7] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ case EMC_DATA_BRLSHFT_1:
+ temp = ((new[8] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[9] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[10] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[11] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[12] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[13] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[14] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[15] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ default:
+ break;
+ }
+
+ return temp;
+}
+
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ emc_writel(emc, 1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, i, EMC_CFG_DIG_DLL);
+ if ((value & EMC_CFG_DIG_DLL_CFG_DLL_EN) == 0)
+ break;
+ }
+ }
+
+ value = emc->next->burst_regs[EMC_DLL_CFG_0_INDEX];
+ emc_writel(emc, value, EMC_DLL_CFG_0);
+
+ value = emc_readl(emc, EMC_DLL_CFG_1);
+ value &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
+
+ if (emc->next->rate >= 400000 && emc->next->rate < 600000)
+ value |= 150;
+ else if (emc->next->rate >= 600000 && emc->next->rate < 800000)
+ value |= 100;
+ else if (emc->next->rate >= 800000 && emc->next->rate < 1000000)
+ value |= 70;
+ else if (emc->next->rate >= 1000000 && emc->next->rate < 1200000)
+ value |= 30;
+ else
+ value |= 20;
+
+ emc_writel(emc, value, EMC_DLL_CFG_1);
+
+ tegra210_change_dll_src(emc, clksrc);
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, 0, EMC_CFG_DIG_DLL);
+ if (value & EMC_CFG_DIG_DLL_CFG_DLL_EN)
+ break;
+ }
+ }
+
+ while (true) {
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED) == 0)
+ continue;
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_LOCK) == 0)
+ continue;
+
+ break;
+ }
+
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ return value & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 cmd_pad, dq_pad, rfu1, cfg5, common_tx, ramp_up_wait = 0;
+ const struct tegra210_emc_timing *timing;
+
+ if (flip_backward)
+ timing = emc->last;
+ else
+ timing = emc->next;
+
+ cmd_pad = timing->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = timing->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = timing->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = timing->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, common_tx & 0xa,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, common_tx & 0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx | 0x8,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ }
+
+ if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & 0xfeedfeed,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + (10 * clk);
+ } else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, rfu1 | 0x06000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + 10 * clk;
+ } else {
+ ccfifo_writel(emc, rfu1 | 0x00000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_up_wait += 12 * clk;
+ }
+
+ cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
+
+ return ramp_up_wait;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 ramp_down_wait = 0, cmd_pad, dq_pad, rfu1, cfg5, common_tx;
+ const struct tegra210_emc_timing *entry;
+ u32 seq_wait;
+
+ if (flip_backward)
+ entry = emc->next;
+ else
+ entry = emc->last;
+
+ cmd_pad = entry->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = entry->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = entry->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = entry->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = entry->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_down_wait = 12 * clk;
+
+ seq_wait = (100000 / clk) + 1;
+
+ if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & ~0x01bf01bf,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0xffff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
+ ramp_down_wait += 100000 + (20 * clk);
+ }
+
+ if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0x5,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, 0, 0, seq_wait);
+ ramp_down_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ }
+
+ return ramp_down_wait;
+}
+
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing)
+{
+ timing->current_dram_clktree[C0D0U0] =
+ timing->trained_dram_clktree[C0D0U0];
+ timing->current_dram_clktree[C0D0U1] =
+ timing->trained_dram_clktree[C0D0U1];
+ timing->current_dram_clktree[C1D0U0] =
+ timing->trained_dram_clktree[C1D0U0];
+ timing->current_dram_clktree[C1D0U1] =
+ timing->trained_dram_clktree[C1D0U1];
+ timing->current_dram_clktree[C1D1U0] =
+ timing->trained_dram_clktree[C1D1U0];
+ timing->current_dram_clktree[C1D1U1] =
+ timing->trained_dram_clktree[C1D1U1];
+}
+
+static void update_dll_control(struct tegra210_emc *emc, u32 value, bool state)
+{
+ unsigned int i;
+
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN,
+ state);
+}
+
+void tegra210_emc_dll_disable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, false);
+}
+
+void tegra210_emc_dll_enable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, true);
+}
+
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing)
+{
+ u32 dsr_cntrl = timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
+ u32 pre_ref = timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
+ u32 ref = timing->burst_regs[EMC_REFRESH_INDEX];
+
+ switch (emc->refresh) {
+ case TEGRA210_EMC_REFRESH_NOMINAL:
+ case TEGRA210_EMC_REFRESH_THROTTLE:
+ break;
+
+ case TEGRA210_EMC_REFRESH_2X:
+ ref = REFRESH_SPEEDUP(ref, 2);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 2);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 2);
+ break;
+
+ case TEGRA210_EMC_REFRESH_4X:
+ ref = REFRESH_SPEEDUP(ref, 4);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 4);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 4);
+ break;
+
+ default:
+ dev_warn(emc->dev, "failed to set refresh: %d\n", emc->refresh);
+ return;
+ }
+
+ emc_writel(emc, ref, emc->offsets->burst[EMC_REFRESH_INDEX]);
+ emc_writel(emc, pre_ref,
+ emc->offsets->burst[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
+ emc_writel(emc, dsr_cntrl,
+ emc->offsets->burst[EMC_DYN_SELF_REF_CONTROL_INDEX]);
+}
+
+static int tegra210_emc_set_rate(struct device *dev,
+ const struct tegra210_clk_emc_config *config)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timing = NULL;
+ unsigned long rate = config->rate;
+ s64 last_change_delay;
+ unsigned long flags;
+ unsigned int i;
+
+ if (rate == emc->last->rate * 1000UL)
+ return 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing)
+ return -EINVAL;
+
+ if (rate > 204000000 && !timing->trained)
+ return -EINVAL;
+
+ emc->next = timing;
+ last_change_delay = ktime_us_delta(ktime_get(), emc->clkchange_time);
+
+ /* XXX use non-busy-looping sleep? */
+ if ((last_change_delay >= 0) &&
+ (last_change_delay < emc->clkchange_delay))
+ udelay(emc->clkchange_delay - (int)last_change_delay);
+
+ spin_lock_irqsave(&emc->lock, flags);
+ tegra210_emc_set_clock(emc, config->value);
+ emc->clkchange_time = ktime_get();
+ emc->last = timing;
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra210_emc_validate_rate(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate * 1000UL)
+ return true;
+
+ return false;
+}
+
+static int tegra210_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra210_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%u", prefix, emc->timings[i].rate * 1000);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int tegra210_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, tegra210_emc_debug_available_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra210_emc_debug_available_rates_fops = {
+ .open = tegra210_emc_debug_available_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra210_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+ tegra210_emc_debug_min_rate_get,
+ tegra210_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+ tegra210_emc_debug_max_rate_get,
+ tegra210_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_temperature_get(void *data, u64 *temperature)
+{
+ struct tegra210_emc *emc = data;
+ unsigned int value;
+
+ if (!emc->debugfs.temperature)
+ value = tegra210_emc_get_temperature(emc);
+ else
+ value = emc->debugfs.temperature;
+
+ *temperature = value;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
+{
+ struct tegra210_emc *emc = data;
+
+ if (temperature > 7)
+ return -EINVAL;
+
+ emc->debugfs.temperature = temperature;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+ tegra210_emc_debug_temperature_get,
+ tegra210_emc_debug_temperature_set, "%llu\n");
+
+static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate * 1000UL;
+
+ if (emc->timings[i].rate * 1000UL > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate * 1000UL;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra210_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_max_rate_fops);
+ debugfs_create_file("temperature", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_temperature_fops);
+}
+
+static void tegra210_emc_detect(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ /* probe the number of connected DRAM devices */
+ value = mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ if (value & MC_EMEM_ADR_CFG_EMEM_NUMDEV)
+ emc->num_devices = 2;
+ else
+ emc->num_devices = 1;
+
+ /* probe the type of DRAM */
+ value = emc_readl(emc, EMC_FBIO_CFG5);
+ emc->dram_type = value & 0x3;
+
+ /* probe the number of channels */
+ value = emc_readl(emc, EMC_FBIO_CFG7);
+
+ if ((value & EMC_FBIO_CFG7_CH1_ENABLE) &&
+ (value & EMC_FBIO_CFG7_CH0_ENABLE))
+ emc->num_channels = 2;
+ else
+ emc->num_channels = 1;
+}
+
+static int tegra210_emc_validate_timings(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timings,
+ unsigned int num_timings)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_timings; i++) {
+ u32 min_volt = timings[i].min_volt;
+ u32 rate = timings[i].rate;
+
+ if (!rate)
+ return -EINVAL;
+
+ if ((i > 0) && ((rate <= timings[i - 1].rate) ||
+ (min_volt < timings[i - 1].min_volt)))
+ return -EINVAL;
+
+ if (timings[i].revision != timings[0].revision)
+ continue;
+ }
+
+ return 0;
+}
+
+static int tegra210_emc_probe(struct platform_device *pdev)
+{
+ struct thermal_cooling_device *cd;
+ unsigned long current_rate;
+ struct platform_device *mc;
+ struct tegra210_emc *emc;
+ struct device_node *np;
+ unsigned int i;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk))
+ return PTR_ERR(emc->clk);
+
+ platform_set_drvdata(pdev, emc);
+ spin_lock_init(&emc->lock);
+ emc->dev = &pdev->dev;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "could not get memory controller\n");
+ return -ENOENT;
+ }
+
+ mc = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!mc)
+ return -ENOENT;
+
+ emc->mc = platform_get_drvdata(mc);
+ if (!emc->mc) {
+ put_device(&mc->dev);
+ return -EPROBE_DEFER;
+ }
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs)) {
+ err = PTR_ERR(emc->regs);
+ goto put_mc;
+ }
+
+ for (i = 0; i < 2; i++) {
+ emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
+ if (IS_ERR(emc->channel[i])) {
+ err = PTR_ERR(emc->channel[i]);
+ goto put_mc;
+ }
+ }
+
+ tegra210_emc_detect(emc);
+ np = pdev->dev.of_node;
+
+ /* attach to the nominal and (optional) derated tables */
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
+ if (err < 0) {
+ dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
+ goto put_mc;
+ }
+
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
+ if (err < 0 && err != -ENODEV) {
+ dev_err(emc->dev, "failed to get derated EMC table: %d\n", err);
+ goto release;
+ }
+
+ /* validate the tables */
+ if (emc->nominal) {
+ err = tegra210_emc_validate_timings(emc, emc->nominal,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ if (emc->derated) {
+ err = tegra210_emc_validate_timings(emc, emc->derated,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ /* default to the nominal table */
+ emc->timings = emc->nominal;
+
+ /* pick the current timing based on the current EMC clock rate */
+ current_rate = clk_get_rate(emc->clk) / 1000;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == current_rate) {
+ emc->last = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (i == emc->num_timings) {
+ dev_err(emc->dev, "no EMC table entry found for %lu kHz\n",
+ current_rate);
+ err = -ENOENT;
+ goto release;
+ }
+
+ /* pick a compatible clock change sequence for the EMC table */
+ for (i = 0; i < ARRAY_SIZE(tegra210_emc_sequences); i++) {
+ const struct tegra210_emc_sequence *sequence =
+ tegra210_emc_sequences[i];
+
+ if (emc->timings[0].revision == sequence->revision) {
+ emc->sequence = sequence;
+ break;
+ }
+ }
+
+ if (!emc->sequence) {
+ dev_err(&pdev->dev, "sequence %u not supported\n",
+ emc->timings[0].revision);
+ err = -ENOTSUPP;
+ goto release;
+ }
+
+ emc->offsets = &tegra210_emc_table_register_offsets;
+ emc->refresh = TEGRA210_EMC_REFRESH_NOMINAL;
+
+ emc->provider.owner = THIS_MODULE;
+ emc->provider.dev = &pdev->dev;
+ emc->provider.set_rate = tegra210_emc_set_rate;
+
+ emc->provider.configs = devm_kcalloc(&pdev->dev, emc->num_timings,
+ sizeof(*emc->provider.configs),
+ GFP_KERNEL);
+ if (!emc->provider.configs) {
+ err = -ENOMEM;
+ goto release;
+ }
+
+ emc->provider.num_configs = emc->num_timings;
+
+ for (i = 0; i < emc->provider.num_configs; i++) {
+ struct tegra210_emc_timing *timing = &emc->timings[i];
+ struct tegra210_clk_emc_config *config =
+ &emc->provider.configs[i];
+ u32 value;
+
+ config->rate = timing->rate * 1000UL;
+ config->value = timing->clk_src_emc;
+
+ value = timing->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX];
+
+ if ((value & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ) == 0)
+ config->same_freq = false;
+ else
+ config->same_freq = true;
+ }
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to attach to EMC clock: %d\n", err);
+ goto release;
+ }
+
+ emc->clkchange_delay = 100;
+ emc->training_interval = 100;
+ dev_set_drvdata(emc->dev, emc);
+
+ timer_setup(&emc->refresh_timer, tegra210_emc_poll_refresh,
+ TIMER_DEFERRABLE);
+ atomic_set(&emc->refresh_poll, 0);
+ emc->refresh_poll_interval = 1000;
+
+ timer_setup(&emc->training, tegra210_emc_train, 0);
+
+ tegra210_emc_debugfs_init(emc);
+
+ cd = devm_thermal_of_cooling_device_register(emc->dev, np, "emc", emc,
+ &tegra210_emc_cd_ops);
+ if (IS_ERR(cd)) {
+ err = PTR_ERR(cd);
+ dev_err(emc->dev, "failed to register cooling device: %d\n",
+ err);
+ goto detach;
+ }
+
+ return 0;
+
+detach:
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+release:
+ of_reserved_mem_device_release(emc->dev);
+put_mc:
+ put_device(emc->mc->dev);
+ return err;
+}
+
+static int tegra210_emc_remove(struct platform_device *pdev)
+{
+ struct tegra210_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+ of_reserved_mem_device_release(emc->dev);
+ put_device(emc->mc->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_suspend(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to acquire clock: %d\n", err);
+ return err;
+ }
+
+ emc->resume_rate = clk_get_rate(emc->clk);
+
+ clk_set_rate(emc->clk, 204000000);
+ tegra210_clk_emc_detach(emc->clk);
+
+ dev_dbg(dev, "suspending at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_resume(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(dev, "failed to attach to EMC clock: %d\n", err);
+ return err;
+ }
+
+ clk_set_rate(emc->clk, emc->resume_rate);
+ clk_rate_exclusive_put(emc->clk);
+
+ dev_dbg(dev, "resuming at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra210_emc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra210_emc_suspend, tegra210_emc_resume)
+};
+
+static const struct of_device_id tegra210_emc_of_match[] = {
+ { .compatible = "nvidia,tegra210-emc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra210_emc_of_match);
+
+static struct platform_driver tegra210_emc_driver = {
+ .driver = {
+ .name = "tegra210-emc",
+ .of_match_table = tegra210_emc_of_match,
+ .pm = &tegra210_emc_pm_ops,
+ },
+ .probe = tegra210_emc_probe,
+ .remove = tegra210_emc_remove,
+};
+
+module_platform_driver(tegra210_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Joseph Lo <josephl@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra210 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-table.c b/drivers/memory/tegra/tegra210-emc-table.c
new file mode 100644
index 000000000000..3e0598363b87
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-table.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "tegra210-emc.h"
+
+#define TEGRA_EMC_MAX_FREQS 16
+
+static int tegra210_emc_table_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timings;
+ unsigned int i, count = 0;
+
+ timings = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!timings) {
+ dev_err(dev, "failed to map EMC table\n");
+ return -ENOMEM;
+ }
+
+ count = 0;
+
+ for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
+ if (timings[i].revision == 0)
+ break;
+
+ count++;
+ }
+
+ /* only the nominal and derated tables are expected */
+ if (emc->derated) {
+ dev_warn(dev, "excess EMC table '%s'\n", rmem->name);
+ goto out;
+ }
+
+ if (emc->nominal) {
+ if (count != emc->num_timings) {
+ dev_warn(dev, "%u derated vs. %u nominal entries\n",
+ count, emc->num_timings);
+ memunmap(timings);
+ return -EINVAL;
+ }
+
+ emc->derated = timings;
+ } else {
+ emc->num_timings = count;
+ emc->nominal = timings;
+ }
+
+out:
+ /* keep track of which table this is */
+ rmem->priv = timings;
+
+ return 0;
+}
+
+static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc_timing *timings = rmem->priv;
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+
+ if ((emc->nominal && timings != emc->nominal) &&
+ (emc->derated && timings != emc->derated))
+ dev_warn(dev, "trying to release unassigned EMC table '%s'\n",
+ rmem->name);
+
+ memunmap(timings);
+}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+ .device_init = tegra210_emc_table_device_init,
+ .device_release = tegra210_emc_table_device_release,
+};
+
+static int tegra210_emc_table_init(struct reserved_mem *rmem)
+{
+ pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
+ (unsigned long)rmem->size);
+
+ rmem->ops = &tegra210_emc_table_ops;
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
+ tegra210_emc_table_init);
diff --git a/drivers/memory/tegra/tegra210-emc.h b/drivers/memory/tegra/tegra210-emc.h
new file mode 100644
index 000000000000..8988bcf15290
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc.h
@@ -0,0 +1,1016 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_EMC_H
+#define TEGRA210_EMC_H
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define DVFS_FGCG_HIGH_SPEED_THRESHOLD 1000
+#define IOBRICK_DCC_THRESHOLD 2400
+#define DVFS_FGCG_MID_SPEED_THRESHOLD 600
+
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+/* register definitions */
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+#define EMC_DBG 0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_WRITE_ACTIVE_ONLY BIT(30)
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SELF_REF BIT(28)
+#define EMC_PIN 0x24
+#define EMC_PIN_PIN_CKE BIT(0)
+#define EMC_PIN_PIN_CKEB BIT(1)
+#define EMC_PIN_PIN_CKE_PER_DEV BIT(2)
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_TPPD 0xac
+#define EMC_ODT_WRITE 0xb0
+#define EMC_PDEX2MRR 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+#define EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT 16
+#define EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT 0
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_EMRS 0xd0
+#define EMC_EMRS_USE_EMRS_LONG_CNT BIT(26)
+#define EMC_REF 0xd4
+#define EMC_REF_REF_CMD BIT(0)
+#define EMC_SELF_REF 0xe0
+#define EMC_MRW 0xe8
+#define EMC_MRW_MRW_OP_SHIFT 0
+#define EMC_MRW_MRW_OP_MASK \
+ (0xff << EMC_MRW_MRW_OP_SHIFT)
+#define EMC_MRW_MRW_MA_SHIFT 16
+#define EMC_MRW_USE_MRW_EXT_CNT 27
+#define EMC_MRW_MRW_DEV_SELECTN_SHIFT 30
+
+#define EMC_MRR 0xec
+#define EMC_MRR_DEV_SEL_SHIFT 30
+#define EMC_MRR_DEV_SEL_MASK 0x3
+#define EMC_MRR_MA_SHIFT 16
+#define EMC_MRR_MA_MASK 0xff
+#define EMC_MRR_DATA_SHIFT 0
+#define EMC_MRR_DATA_MASK 0xffff
+
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK \
+ (0x3 << EMC_FBIO_CFG5_DRAM_TYPE_SHIFT)
+#define EMC_FBIO_CFG5_CMD_TX_DIS BIT(8)
+
+#define EMC_PDEX2CKE 0x118
+#define EMC_CKE2PDEN 0x11c
+#define EMC_MPC 0x128
+#define EMC_EMRS2 0x12c
+#define EMC_EMRS2_USE_EMRS2_LONG_CNT BIT(26)
+#define EMC_MRW2 0x134
+#define EMC_MRW3 0x138
+#define EMC_MRW4 0x13c
+#define EMC_R2R 0x144
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START BIT(0)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL BIT(9)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL BIT(10)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE BIT(29)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_EMC_STATUS 0x2b4
+#define EMC_EMC_STATUS_MRR_DIVLD BIT(20)
+#define EMC_EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT 4
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT)
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT 8
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_CFG_DLL_EN BIT(0)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK BIT(1)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC BIT(3)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK BIT(4)
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT 6
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK \
+ (0x3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT)
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT 8
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK \
+ (0x7 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_DIG_DLL_STATUS 0x2c4
+#define EMC_DIG_DLL_STATUS_DLL_LOCK BIT(15)
+#define EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED BIT(17)
+#define EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT 0
+#define EMC_DIG_DLL_STATUS_DLL_OUT_MASK \
+ (0x7ff << EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_1 0x2c8
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_RDV_EARLY_MASK 0x2d4
+#define EMC_RDV_EARLY 0x2d8
+#define EMC_AUTO_CAL_CONFIG8 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK 0x7ff
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT 0
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_DEV_SEL_SHIFT 30
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_ZQ_LATCH_CMD BIT(1)
+#define EMC_ZQ_CAL_ZQ_CAL_CMD BIT(0)
+#define EMC_FDPD_CTRL_DQ 0x310
+#define EMC_FDPD_CTRL_CMD 0x314
+#define EMC_PMACRO_CMD_BRICK_CTRL_FDPD 0x318
+#define EMC_PMACRO_DATA_BRICK_CTRL_FDPD 0x31c
+#define EMC_PMACRO_BRICK_CTRL_RFU1 0x330
+#define EMC_PMACRO_BRICK_CTRL_RFU2 0x334
+#define EMC_TR_TIMING_0 0x3b4
+#define EMC_TR_CTRL_1 0x3bc
+#define EMC_TR_RDV 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN BIT(2)
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_ADDR_STALL_BY_1 (1 << 31)
+#define EMC_CCFIFO_ADDR_STALL(x) (((x) & 0x7fff) << 16)
+#define EMC_CCFIFO_ADDR_OFFSET(x) ((x) & 0xffff)
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_TR_QPOP 0x3f4
+#define EMC_TR_RDV_MASK 0x3f8
+#define EMC_TR_QSAFE 0x3fc
+#define EMC_TR_QRST 0x400
+#define EMC_ISSUE_QRST 0x428
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_TR_DVFS 0x460
+#define EMC_AUTO_CAL_CHANNEL 0x464
+#define EMC_IBDLY 0x468
+#define EMC_OBDLY 0x46c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_WE_DURATION 0x48c
+#define EMC_WS_DURATION 0x490
+#define EMC_WEV 0x494
+#define EMC_WSV 0x498
+#define EMC_CFG_3 0x49c
+#define EMC_MRW6 0x4a4
+#define EMC_MRW7 0x4a8
+#define EMC_MRW8 0x4ac
+#define EMC_MRW9 0x4b0
+#define EMC_MRW10 0x4b4
+#define EMC_MRW11 0x4b8
+#define EMC_MRW12 0x4bc
+#define EMC_MRW13 0x4c0
+#define EMC_MRW14 0x4c4
+#define EMC_MRW15 0x4d0
+#define EMC_CFG_SYNC 0x4d4
+#define EMC_FDPD_CTRL_CMD_NO_RAMP 0x4d8
+#define EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE BIT(0)
+#define EMC_WDV_CHK 0x4e0
+#define EMC_CFG_PIPE_2 0x554
+#define EMC_CFG_PIPE_CLK 0x558
+#define EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON BIT(0)
+#define EMC_CFG_PIPE_1 0x55c
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_AUTO_CAL_CONFIG7 0x574
+#define EMC_REFCTRL2 0x580
+#define EMC_FBIO_CFG7 0x584
+#define EMC_FBIO_CFG7_CH0_ENABLE BIT(1)
+#define EMC_FBIO_CFG7_CH1_ENABLE BIT(2)
+#define EMC_DATA_BRLSHFT_0 0x588
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_DATA_BRLSHFT_1 0x58c
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_RFCPB 0x590
+#define EMC_DQS_BRLSHFT_0 0x594
+#define EMC_DQS_BRLSHFT_1 0x598
+#define EMC_CMD_BRLSHFT_0 0x59c
+#define EMC_CMD_BRLSHFT_1 0x5a0
+#define EMC_CMD_BRLSHFT_2 0x5a4
+#define EMC_CMD_BRLSHFT_3 0x5a8
+#define EMC_QUSE_BRLSHFT_0 0x5ac
+#define EMC_AUTO_CAL_CONFIG4 0x5b0
+#define EMC_AUTO_CAL_CONFIG5 0x5b4
+#define EMC_QUSE_BRLSHFT_1 0x5b8
+#define EMC_QUSE_BRLSHFT_2 0x5bc
+#define EMC_CCDMW 0x5c0
+#define EMC_QUSE_BRLSHFT_3 0x5c4
+#define EMC_AUTO_CAL_CONFIG6 0x5cc
+#define EMC_DLL_CFG_0 0x5e4
+#define EMC_DLL_CFG_1 0x5e8
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT 10
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK \
+ (0x7ff << EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT)
+
+#define EMC_CONFIG_SAMPLE_DELAY 0x5f0
+#define EMC_CFG_UPDATE 0x5f4
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT 9
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK \
+ (0x3 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT)
+
+#define EMC_PMACRO_QUSE_DDLL_RANK0_0 0x600
+#define EMC_PMACRO_QUSE_DDLL_RANK0_1 0x604
+#define EMC_PMACRO_QUSE_DDLL_RANK0_2 0x608
+#define EMC_PMACRO_QUSE_DDLL_RANK0_3 0x60c
+#define EMC_PMACRO_QUSE_DDLL_RANK0_4 0x610
+#define EMC_PMACRO_QUSE_DDLL_RANK0_5 0x614
+#define EMC_PMACRO_QUSE_DDLL_RANK1_0 0x620
+#define EMC_PMACRO_QUSE_DDLL_RANK1_1 0x624
+#define EMC_PMACRO_QUSE_DDLL_RANK1_2 0x628
+#define EMC_PMACRO_QUSE_DDLL_RANK1_3 0x62c
+#define EMC_PMACRO_QUSE_DDLL_RANK1_4 0x630
+#define EMC_PMACRO_QUSE_DDLL_RANK1_5 0x634
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 0x640
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 0x644
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 0x648
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 0x64c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4 0x650
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5 0x654
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 0x660
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 0x664
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 0x668
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 0x66c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4 0x670
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5 0x674
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0 0x680
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1 0x684
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2 0x688
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3 0x68c
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4 0x690
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5 0x694
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0 0x6a0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1 0x6a4
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2 0x6a8
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3 0x6ac
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4 0x6b0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5 0x6b4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0 0x6c0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1 0x6c4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2 0x6c8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3 0x6cc
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0 0x6e0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1 0x6e4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2 0x6e8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3 0x6ec
+#define EMC_PMACRO_TX_PWRD_0 0x720
+#define EMC_PMACRO_TX_PWRD_1 0x724
+#define EMC_PMACRO_TX_PWRD_2 0x728
+#define EMC_PMACRO_TX_PWRD_3 0x72c
+#define EMC_PMACRO_TX_PWRD_4 0x730
+#define EMC_PMACRO_TX_PWRD_5 0x734
+#define EMC_PMACRO_TX_SEL_CLK_SRC_0 0x740
+#define EMC_PMACRO_TX_SEL_CLK_SRC_1 0x744
+#define EMC_PMACRO_TX_SEL_CLK_SRC_3 0x74c
+#define EMC_PMACRO_TX_SEL_CLK_SRC_2 0x748
+#define EMC_PMACRO_TX_SEL_CLK_SRC_4 0x750
+#define EMC_PMACRO_TX_SEL_CLK_SRC_5 0x754
+#define EMC_PMACRO_DDLL_BYPASS 0x760
+#define EMC_PMACRO_DDLL_PWRD_0 0x770
+#define EMC_PMACRO_DDLL_PWRD_1 0x774
+#define EMC_PMACRO_DDLL_PWRD_2 0x778
+#define EMC_PMACRO_CMD_CTRL_0 0x780
+#define EMC_PMACRO_CMD_CTRL_1 0x784
+#define EMC_PMACRO_CMD_CTRL_2 0x788
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0x800
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0x804
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0x808
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3 0x80c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0x810
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0x814
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0x818
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3 0x81c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0x820
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0x824
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0x828
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3 0x82c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0x830
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0x834
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0x838
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3 0x83c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0x840
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0x844
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0x848
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3 0x84c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0x850
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0x854
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0x858
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3 0x85c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0x860
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0x864
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0x868
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3 0x86c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0x870
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0x874
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0x878
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3 0x87c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0 0x880
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1 0x884
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2 0x888
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3 0x88c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0 0x890
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1 0x894
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2 0x898
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3 0x89c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0 0x8a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1 0x8a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2 0x8a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3 0x8ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0 0x8b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1 0x8b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2 0x8b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3 0x8bc
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0x900
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0x904
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0x908
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3 0x90c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0x910
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0x914
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0x918
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3 0x91c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0x920
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0x924
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0x928
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3 0x92c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0x930
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0x934
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0x938
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3 0x93c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0x940
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0x944
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0x948
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3 0x94c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0x950
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0x954
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0x958
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3 0x95c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0x960
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0x964
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0x968
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3 0x96c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0x970
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0x974
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0x978
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3 0x97c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0 0x980
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1 0x984
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2 0x988
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3 0x98c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0 0x990
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1 0x994
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2 0x998
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3 0x99c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0 0x9a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1 0x9a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2 0x9a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3 0x9ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0 0x9b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1 0x9b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2 0x9b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3 0x9bc
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0xa00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0xa04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0xa08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0xa10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0xa14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0xa18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0xa20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0xa24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0xa28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0xa30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0xa34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0xa38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0xa40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0xa44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0xa48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0xa50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0xa54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0xa58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0xa60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0xa64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0xa68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0xa70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0xa74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0xa78
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0xb00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0xb04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0xb08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0xb10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0xb14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0xb18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0xb20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0xb24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0xb28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0xb30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0xb34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0xb38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0xb40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0xb44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0xb48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0xb50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0xb54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0xb58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0xb60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0xb64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0xb68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0xb70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0xb74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0xb78
+#define EMC_PMACRO_IB_VREF_DQ_0 0xbe0
+#define EMC_PMACRO_IB_VREF_DQ_1 0xbe4
+#define EMC_PMACRO_IB_VREF_DQS_0 0xbf0
+#define EMC_PMACRO_IB_VREF_DQS_1 0xbf4
+#define EMC_PMACRO_DDLL_LONG_CMD_0 0xc00
+#define EMC_PMACRO_DDLL_LONG_CMD_1 0xc04
+#define EMC_PMACRO_DDLL_LONG_CMD_2 0xc08
+#define EMC_PMACRO_DDLL_LONG_CMD_3 0xc0c
+#define EMC_PMACRO_DDLL_LONG_CMD_4 0xc10
+#define EMC_PMACRO_DDLL_LONG_CMD_5 0xc14
+#define EMC_PMACRO_DDLL_SHORT_CMD_0 0xc20
+#define EMC_PMACRO_DDLL_SHORT_CMD_1 0xc24
+#define EMC_PMACRO_DDLL_SHORT_CMD_2 0xc28
+#define EMC_PMACRO_CFG_PM_GLOBAL_0 0xc30
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 BIT(16)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 BIT(17)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 BIT(18)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 BIT(19)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 BIT(20)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 BIT(21)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 BIT(22)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7 BIT(23)
+#define EMC_PMACRO_VTTGEN_CTRL_0 0xc34
+#define EMC_PMACRO_VTTGEN_CTRL_1 0xc38
+#define EMC_PMACRO_BG_BIAS_CTRL_0 0xc3c
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD BIT(0)
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD BIT(2)
+#define EMC_PMACRO_PAD_CFG_CTRL 0xc40
+#define EMC_PMACRO_ZCTRL 0xc44
+#define EMC_PMACRO_CMD_PAD_RX_CTRL 0xc50
+#define EMC_PMACRO_DATA_PAD_RX_CTRL 0xc54
+#define EMC_PMACRO_CMD_RX_TERM_MODE 0xc58
+#define EMC_PMACRO_DATA_RX_TERM_MODE 0xc5c
+#define EMC_PMACRO_CMD_PAD_TX_CTRL 0xc60
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC BIT(24)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON BIT(26)
+
+#define EMC_PMACRO_DATA_PAD_TX_CTRL 0xc64
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF BIT(0)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF BIT(8)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC BIT(24)
+
+#define EMC_PMACRO_COMMON_PAD_TX_CTRL 0xc68
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON 0xc78
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS BIT(16)
+#define EMC_PMACRO_VTTGEN_CTRL_2 0xcf0
+#define EMC_PMACRO_IB_RXRT 0xcf4
+#define EMC_PMACRO_TRAINING_CTRL_0 0xcf8
+#define EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR BIT(3)
+#define EMC_PMACRO_TRAINING_CTRL_1 0xcfc
+#define EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR BIT(3)
+#define EMC_TRAINING_CTRL 0xe04
+#define EMC_TRAINING_QUSE_CORS_CTRL 0xe0c
+#define EMC_TRAINING_QUSE_FINE_CTRL 0xe10
+#define EMC_TRAINING_QUSE_CTRL_MISC 0xe14
+#define EMC_TRAINING_WRITE_FINE_CTRL 0xe18
+#define EMC_TRAINING_WRITE_CTRL_MISC 0xe1c
+#define EMC_TRAINING_WRITE_VREF_CTRL 0xe20
+#define EMC_TRAINING_READ_FINE_CTRL 0xe24
+#define EMC_TRAINING_READ_CTRL_MISC 0xe28
+#define EMC_TRAINING_READ_VREF_CTRL 0xe2c
+#define EMC_TRAINING_CA_FINE_CTRL 0xe30
+#define EMC_TRAINING_CA_CTRL_MISC 0xe34
+#define EMC_TRAINING_CA_CTRL_MISC1 0xe38
+#define EMC_TRAINING_CA_VREF_CTRL 0xe3c
+#define EMC_TRAINING_SETTLE 0xe44
+#define EMC_TRAINING_MPC 0xe5c
+#define EMC_TRAINING_VREF_SETTLE 0xe6c
+#define EMC_TRAINING_QUSE_VREF_CTRL 0xed0
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK0 0xed4
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK1 0xed8
+
+#define EMC_COPY_TABLE_PARAM_PERIODIC_FIELDS BIT(0)
+#define EMC_COPY_TABLE_PARAM_TRIM_REGS BIT(1)
+
+enum burst_regs_list {
+ EMC_RP_INDEX = 6,
+ EMC_R2P_INDEX = 9,
+ EMC_W2P_INDEX,
+ EMC_MRW6_INDEX = 31,
+ EMC_REFRESH_INDEX = 41,
+ EMC_PRE_REFRESH_REQ_CNT_INDEX = 43,
+ EMC_TRPAB_INDEX = 59,
+ EMC_MRW7_INDEX = 62,
+ EMC_FBIO_CFG5_INDEX = 65,
+ EMC_FBIO_CFG7_INDEX,
+ EMC_CFG_DIG_DLL_INDEX,
+ EMC_ZCAL_INTERVAL_INDEX = 139,
+ EMC_ZCAL_WAIT_CNT_INDEX,
+ EMC_MRS_WAIT_CNT_INDEX = 141,
+ EMC_DLL_CFG_0_INDEX = 144,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX = 146,
+ EMC_CFG_INDEX = 148,
+ EMC_DYN_SELF_REF_CONTROL_INDEX = 150,
+ EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX = 161,
+ EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_BRICK_CTRL_RFU1_INDEX = 167,
+ EMC_PMACRO_BG_BIAS_CTRL_0_INDEX = 171,
+ EMC_MRW14_INDEX = 199,
+ EMC_MRW15_INDEX = 220,
+};
+
+enum trim_regs_list {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_INDEX = 60,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_INDEX,
+};
+
+enum burst_mc_regs_list {
+ MC_EMEM_ARB_MISC0_INDEX = 20,
+};
+
+enum {
+ T_RP,
+ T_FC_LPDDR4,
+ T_RFC,
+ T_PDEX,
+ RL,
+};
+
+enum {
+ AUTO_PD = 0,
+ MAN_SR = 2,
+};
+
+enum {
+ ASSEMBLY = 0,
+ ACTIVE,
+};
+
+enum {
+ C0D0U0,
+ C0D0U1,
+ C0D1U0,
+ C0D1U1,
+ C1D0U0,
+ C1D0U1,
+ C1D1U0,
+ C1D1U1,
+ DRAM_CLKTREE_NUM,
+};
+
+#define VREF_REGS_PER_CHANNEL_SIZE 4
+#define DRAM_TIMINGS_NUM 5
+#define BURST_REGS_PER_CHANNEL_SIZE 8
+#define TRIM_REGS_PER_CHANNEL_SIZE 10
+#define PTFV_ARRAY_SIZE 12
+#define SAVE_RESTORE_MOD_REGS_SIZE 12
+#define TRAINING_MOD_REGS_SIZE 20
+#define BURST_UP_DOWN_REGS_SIZE 24
+#define BURST_MC_REGS_SIZE 33
+#define TRIM_REGS_SIZE 138
+#define BURST_REGS_SIZE 221
+
+struct tegra210_emc_per_channel_regs {
+ u16 bank;
+ u16 offset;
+};
+
+struct tegra210_emc_table_register_offsets {
+ u16 burst[BURST_REGS_SIZE];
+ u16 trim[TRIM_REGS_SIZE];
+ u16 burst_mc[BURST_MC_REGS_SIZE];
+ u16 la_scale[BURST_UP_DOWN_REGS_SIZE];
+ struct tegra210_emc_per_channel_regs burst_per_channel[BURST_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs trim_per_channel[TRIM_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs vref_per_channel[VREF_REGS_PER_CHANNEL_SIZE];
+};
+
+struct tegra210_emc_timing {
+ u32 revision;
+ const char dvfs_ver[60];
+ u32 rate;
+ u32 min_volt;
+ u32 gpu_min_volt;
+ const char clock_src[32];
+ u32 clk_src_emc;
+ u32 needs_training;
+ u32 training_pattern;
+ u32 trained;
+
+ u32 periodic_training;
+ u32 trained_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 current_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 run_clocks;
+ u32 tree_margin;
+
+ u32 num_burst;
+ u32 num_burst_per_ch;
+ u32 num_trim;
+ u32 num_trim_per_ch;
+ u32 num_mc_regs;
+ u32 num_up_down;
+ u32 vref_num;
+ u32 training_mod_num;
+ u32 dram_timing_num;
+
+ u32 ptfv_list[PTFV_ARRAY_SIZE];
+
+ u32 burst_regs[BURST_REGS_SIZE];
+ u32 burst_reg_per_ch[BURST_REGS_PER_CHANNEL_SIZE];
+ u32 shadow_regs_ca_train[BURST_REGS_SIZE];
+ u32 shadow_regs_quse_train[BURST_REGS_SIZE];
+ u32 shadow_regs_rdwr_train[BURST_REGS_SIZE];
+
+ u32 trim_regs[TRIM_REGS_SIZE];
+ u32 trim_perch_regs[TRIM_REGS_PER_CHANNEL_SIZE];
+
+ u32 vref_perch_regs[VREF_REGS_PER_CHANNEL_SIZE];
+
+ u32 dram_timings[DRAM_TIMINGS_NUM];
+ u32 training_mod_regs[TRAINING_MOD_REGS_SIZE];
+ u32 save_restore_mod_regs[SAVE_RESTORE_MOD_REGS_SIZE];
+ u32 burst_mc_regs[BURST_MC_REGS_SIZE];
+ u32 la_scale_regs[BURST_UP_DOWN_REGS_SIZE];
+
+ u32 min_mrs_wait;
+ u32 emc_mrw;
+ u32 emc_mrw2;
+ u32 emc_mrw3;
+ u32 emc_mrw4;
+ u32 emc_mrw9;
+ u32 emc_mrs;
+ u32 emc_emrs;
+ u32 emc_emrs2;
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_config4;
+ u32 emc_auto_cal_config5;
+ u32 emc_auto_cal_config6;
+ u32 emc_auto_cal_config7;
+ u32 emc_auto_cal_config8;
+ u32 emc_cfg_2;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_fdpd_ctrl_cmd_no_ramp;
+ u32 dll_clk_src;
+ u32 clk_out_enb_x_0_clk_enb_emc_dll;
+ u32 latency;
+};
+
+enum tegra210_emc_refresh {
+ TEGRA210_EMC_REFRESH_NOMINAL = 0,
+ TEGRA210_EMC_REFRESH_2X,
+ TEGRA210_EMC_REFRESH_4X,
+ TEGRA210_EMC_REFRESH_THROTTLE, /* 4x Refresh + derating. */
+};
+
+#define DRAM_TYPE_DDR3 0
+#define DRAM_TYPE_LPDDR4 1
+#define DRAM_TYPE_LPDDR2 2
+#define DRAM_TYPE_DDR2 3
+
+struct tegra210_emc {
+ struct tegra_mc *mc;
+ struct device *dev;
+ struct clk *clk;
+
+ /* nominal EMC frequency table */
+ struct tegra210_emc_timing *nominal;
+ /* derated EMC frequency table */
+ struct tegra210_emc_timing *derated;
+
+ /* currently selected table (nominal or derated) */
+ struct tegra210_emc_timing *timings;
+ unsigned int num_timings;
+
+ const struct tegra210_emc_table_register_offsets *offsets;
+
+ const struct tegra210_emc_sequence *sequence;
+ spinlock_t lock;
+
+ void __iomem *regs, *channel[2];
+ unsigned int num_channels;
+ unsigned int num_devices;
+ unsigned int dram_type;
+
+ struct tegra210_emc_timing *last;
+ struct tegra210_emc_timing *next;
+
+ unsigned int training_interval;
+ struct timer_list training;
+
+ enum tegra210_emc_refresh refresh;
+ unsigned int refresh_poll_interval;
+ struct timer_list refresh_timer;
+ unsigned int temperature;
+ atomic_t refresh_poll;
+
+ ktime_t clkchange_time;
+ int clkchange_delay;
+
+ unsigned long resume_rate;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned int temperature;
+ } debugfs;
+
+ struct tegra210_clk_emc_provider provider;
+};
+
+struct tegra210_emc_sequence {
+ u8 revision;
+ void (*set_clock)(struct tegra210_emc *emc, u32 clksrc);
+ u32 (*periodic_compensation)(struct tegra210_emc *emc);
+};
+
+static inline void emc_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset)
+{
+ writel_relaxed(value, emc->regs + offset);
+}
+
+static inline u32 emc_readl(struct tegra210_emc *emc, unsigned int offset)
+{
+ return readl_relaxed(emc->regs + offset);
+}
+
+static inline void emc_channel_writel(struct tegra210_emc *emc,
+ unsigned int channel,
+ u32 value, unsigned int offset)
+{
+ writel_relaxed(value, emc->channel[channel] + offset);
+}
+
+static inline u32 emc_channel_readl(struct tegra210_emc *emc,
+ unsigned int channel, unsigned int offset)
+{
+ return readl_relaxed(emc->channel[channel] + offset);
+}
+
+static inline void ccfifo_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset, u32 delay)
+{
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_DATA);
+
+ value = EMC_CCFIFO_ADDR_STALL_BY_1 | EMC_CCFIFO_ADDR_STALL(delay) |
+ EMC_CCFIFO_ADDR_OFFSET(offset);
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static inline u32 div_o3(u32 a, u32 b)
+{
+ u32 result = a / b;
+
+ if ((b * result) < a)
+ return result + 1;
+
+ return result;
+}
+
+/* from tegra210-emc-r21021.c */
+extern const struct tegra210_emc_sequence tegra210_emc_r21021;
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh);
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address);
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc);
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set);
+void tegra210_emc_timing_update(struct tegra210_emc *emc);
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next);
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate);
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing);
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state);
+unsigned long tegra210_emc_actual_osc_clocks(u32 in);
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset);
+void tegra210_emc_dll_disable(struct tegra210_emc *emc);
+void tegra210_emc_dll_enable(struct tegra210_emc *emc);
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc);
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing);
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc);
+
+#endif
diff --git a/drivers/memory/tegra/tegra210-mc.h b/drivers/memory/tegra/tegra210-mc.h
new file mode 100644
index 000000000000..b9b91ceb4730
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-mc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_MC_H
+#define TEGRA210_MC_H
+
+#include "mc.h"
+
+/* register definitions */
+#define MC_LATENCY_ALLOWANCE_AVPC_0 0x2e4
+#define MC_LATENCY_ALLOWANCE_HC_0 0x310
+#define MC_LATENCY_ALLOWANCE_HC_1 0x314
+#define MC_LATENCY_ALLOWANCE_MPCORE_0 0x320
+#define MC_LATENCY_ALLOWANCE_NVENC_0 0x328
+#define MC_LATENCY_ALLOWANCE_PPCS_0 0x344
+#define MC_LATENCY_ALLOWANCE_PPCS_1 0x348
+#define MC_LATENCY_ALLOWANCE_ISP2_0 0x370
+#define MC_LATENCY_ALLOWANCE_ISP2_1 0x374
+#define MC_LATENCY_ALLOWANCE_XUSB_0 0x37c
+#define MC_LATENCY_ALLOWANCE_XUSB_1 0x380
+#define MC_LATENCY_ALLOWANCE_TSEC_0 0x390
+#define MC_LATENCY_ALLOWANCE_VIC_0 0x394
+#define MC_LATENCY_ALLOWANCE_VI2_0 0x398
+#define MC_LATENCY_ALLOWANCE_GPU_0 0x3ac
+#define MC_LATENCY_ALLOWANCE_SDMMCA_0 0x3b8
+#define MC_LATENCY_ALLOWANCE_SDMMCAA_0 0x3bc
+#define MC_LATENCY_ALLOWANCE_SDMMC_0 0x3c0
+#define MC_LATENCY_ALLOWANCE_SDMMCAB_0 0x3c4
+#define MC_LATENCY_ALLOWANCE_GPU2_0 0x3e8
+#define MC_LATENCY_ALLOWANCE_NVDEC_0 0x3d8
+#define MC_MLL_MPCORER_PTSA_RATE 0x44c
+#define MC_FTOP_PTSA_RATE 0x50c
+#define MC_EMEM_ARB_TIMING_RFCPB 0x6c0
+#define MC_EMEM_ARB_TIMING_CCDMW 0x6c4
+#define MC_EMEM_ARB_REFPB_HP_CTRL 0x6f0
+#define MC_EMEM_ARB_REFPB_BANK_CTRL 0x6f4
+#define MC_PTSA_GRANT_DECREMENT 0x960
+#define MC_EMEM_ARB_DHYST_CTRL 0xbcc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0 0xbd0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1 0xbd4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2 0xbd8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3 0xbdc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4 0xbe0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5 0xbe4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6 0xbe8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7 0xbec
+
+#endif
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index b42bdb667e85..055af0e08a2e 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -327,7 +326,6 @@ struct emc_timing {
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -374,52 +372,10 @@ static int emc_seq_update_timing(struct tegra_emc *emc)
return 0;
}
-static void emc_complete_clk_change(struct tegra_emc *emc)
-{
- struct emc_timing *timing = emc->new_timing;
- unsigned int dram_num;
- bool failed = false;
- int err;
-
- /* re-enable auto-refresh */
- dram_num = tegra_mc_get_emem_device_count(emc->mc);
- writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
- emc->regs + EMC_REFCTRL);
-
- /* restore auto-calibration */
- if (emc->vref_cal_toggle)
- writel_relaxed(timing->emc_auto_cal_interval,
- emc->regs + EMC_AUTO_CAL_INTERVAL);
-
- /* restore dynamic self-refresh */
- if (timing->emc_cfg_dyn_self_ref) {
- emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
- writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
- }
-
- /* set number of clocks to wait after each ZQ command */
- if (emc->zcal_long)
- writel_relaxed(timing->emc_zcal_cnt_long,
- emc->regs + EMC_ZCAL_WAIT_CNT);
-
- /* wait for writes to settle */
- udelay(2);
-
- /* update restored timing */
- err = emc_seq_update_timing(emc);
- if (err)
- failed = true;
-
- /* restore early ACK */
- mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
- WRITE_ONCE(emc->bad_state, failed);
-}
-
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
@@ -434,18 +390,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
/* clear interrupts */
writel_relaxed(status, emc->regs + EMC_INTSTATUS);
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT) {
- if (completion_done(&emc->clk_handshake_complete)) {
- dev_err_ratelimited(emc->dev,
- "bogus handshake interrupt\n");
- return IRQ_NONE;
- }
-
- emc_complete_clk_change(emc);
- complete(&emc->clk_handshake_complete);
- }
-
return IRQ_HANDLED;
}
@@ -801,29 +745,58 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
*/
mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
- reinit_completion(&emc->clk_handshake_complete);
-
- emc->new_timing = timing;
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc,
unsigned long rate)
{
- unsigned long timeout;
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned int dram_num;
+ int err;
+ u32 v;
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "emc-car handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
- if (READ_ONCE(emc->bad_state))
- return -EIO;
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
- return 0;
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (!err)
+ emc->bad_state = false;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ return err;
}
static int emc_unprepare_timing_change(struct tegra_emc *emc,
@@ -1033,7 +1006,7 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 fbio_cfg5, emc_cfg, emc_dbg;
enum emc_dram_type dram_type;
@@ -1275,11 +1248,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@ -1321,7 +1294,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (!emc->mc)
return -EPROBE_DEFER;
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index db526dbf71ee..159a16f5e7d6 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -27,7 +27,7 @@
#define WSTROBE_SHIFT 20
#define WSETUP_SHIFT 26
#define EW_SHIFT 30
-#define SS_SHIFT 31
+#define SSTROBE_SHIFT 31
#define TA(x) ((x) << TA_SHIFT)
#define RHOLD(x) ((x) << RHOLD_SHIFT)
@@ -37,7 +37,7 @@
#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
#define WSETUP(x) ((x) << WSETUP_SHIFT)
#define EW(x) ((x) << EW_SHIFT)
-#define SS(x) ((x) << SS_SHIFT)
+#define SSTROBE(x) ((x) << SSTROBE_SHIFT)
#define ASIZE_MAX 0x1
#define TA_MAX 0x3
@@ -48,7 +48,7 @@
#define WSTROBE_MAX 0x3f
#define WSETUP_MAX 0xf
#define EW_MAX 0x1
-#define SS_MAX 0x1
+#define SSTROBE_MAX 0x1
#define NUM_CS 4
#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
@@ -59,7 +59,7 @@
#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
-#define SS_VAL(x) (((x) & SS(SS_MAX)) >> SS_SHIFT)
+#define SSTROBE_VAL(x) (((x) & SSTROBE(SSTROBE_MAX)) >> SSTROBE_SHIFT)
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
@@ -67,7 +67,7 @@
#define ACR_ASIZE_MASK 0x3
#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
+#define ACR_SSTROBE_MASK BIT(31)
#define ASIZE_16BIT 1
#define CONFIG_MASK (TA(TA_MAX) | \
@@ -77,7 +77,7 @@
WHOLD(WHOLD_MAX) | \
WSTROBE(WSTROBE_MAX) | \
WSETUP(WSETUP_MAX) | \
- EW(EW_MAX) | SS(SS_MAX) | \
+ EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
ASIZE_MAX)
/**
@@ -204,7 +204,7 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
if (data->enable_ew)
set |= ACR_EW_MASK;
if (data->enable_ss)
- set |= ACR_SS_MASK;
+ set |= ACR_SSTROBE_MASK;
val = readl(aemif->base + offset);
val &= ~CONFIG_MASK;
@@ -246,7 +246,7 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
data->enable_ew = EW_VAL(val);
- data->enable_ss = SS_VAL(val);
+ data->enable_ss = SSTROBE_VAL(val);
data->asize = val & ASIZE_MAX;
}
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 9c90f815ad3a..6c747c1e98cb 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -248,7 +248,7 @@ MODULE_DEVICE_TABLE(of, ti_emif_of_match);
static int ti_emif_resume(struct device *dev)
{
unsigned long tmp =
- __raw_readl((void *)emif_instance->ti_emif_sram_virt);
+ __raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
/*
* Check to see if what we are copying is already present in the
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
index 854238444616..effc177db80a 100644
--- a/drivers/reset/reset-intel-gw.c
+++ b/drivers/reset/reset-intel-gw.c
@@ -15,9 +15,9 @@
#define RCU_RST_STAT 0x0024
#define RCU_RST_REQ 0x0048
-#define REG_OFFSET GENMASK(31, 16)
-#define BIT_OFFSET GENMASK(15, 8)
-#define STAT_BIT_OFFSET GENMASK(7, 0)
+#define REG_OFFSET_MASK GENMASK(31, 16)
+#define BIT_OFFSET_MASK GENMASK(15, 8)
+#define STAT_BIT_OFFSET_MASK GENMASK(7, 0)
#define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
@@ -51,11 +51,11 @@ static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
unsigned long id, u32 *rst_req,
u32 *req_bit, u32 *stat_bit)
{
- *rst_req = FIELD_GET(REG_OFFSET, id);
- *req_bit = FIELD_GET(BIT_OFFSET, id);
+ *rst_req = FIELD_GET(REG_OFFSET_MASK, id);
+ *req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
if (data->soc_data->legacy)
- *stat_bit = FIELD_GET(STAT_BIT_OFFSET, id);
+ *stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
else
*stat_bit = *req_bit;
@@ -141,14 +141,14 @@ static int intel_reset_xlate(struct reset_controller_dev *rcdev,
if (spec->args[1] > 31)
return -EINVAL;
- id = FIELD_PREP(REG_OFFSET, spec->args[0]);
- id |= FIELD_PREP(BIT_OFFSET, spec->args[1]);
+ id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
+ id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
if (data->soc_data->legacy) {
if (spec->args[2] > 31)
return -EINVAL;
- id |= FIELD_PREP(STAT_BIT_OFFSET, spec->args[2]);
+ id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
}
return id;
@@ -210,11 +210,11 @@ static int intel_reset_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->reboot_id = FIELD_PREP(REG_OFFSET, rb_id[0]);
- data->reboot_id |= FIELD_PREP(BIT_OFFSET, rb_id[1]);
+ data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
+ data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
if (data->soc_data->legacy)
- data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET, rb_id[2]);
+ data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
data->restart_nb.notifier_call = intel_reset_restart_handler;
data->restart_nb.priority = 128;
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 067e7e7b34f1..e066614818a3 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -11,6 +11,7 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,10 +19,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/spinlock.h>
-#include "reset-simple.h"
-
static inline struct reset_simple_data *
to_reset_simple_data(struct reset_controller_dev *rcdev)
{
@@ -64,6 +64,24 @@ static int reset_simple_deassert(struct reset_controller_dev *rcdev,
return reset_simple_update(rcdev, id, false);
}
+static int reset_simple_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int ret;
+
+ if (!data->reset_us)
+ return -ENOTSUPP;
+
+ ret = reset_simple_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(data->reset_us, data->reset_us * 2);
+
+ return reset_simple_deassert(rcdev, id);
+}
+
static int reset_simple_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -81,6 +99,7 @@ static int reset_simple_status(struct reset_controller_dev *rcdev,
const struct reset_control_ops reset_simple_ops = {
.assert = reset_simple_assert,
.deassert = reset_simple_deassert,
+ .reset = reset_simple_reset,
.status = reset_simple_status,
};
EXPORT_SYMBOL_GPL(reset_simple_ops);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 96953992c2bb..bdd984296196 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -11,13 +11,12 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/reset/socfpga.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include "reset-simple.h"
-
#define SOCFPGA_NR_BANKS 8
static int a10_reset_init(struct device_node *np)
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index e7f169e57bcf..e752594b6971 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -14,13 +14,12 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include "reset-simple.h"
-
static int sunxi_reset_init(struct device_node *np)
{
struct reset_simple_data *data;
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
index bf68729ab729..b799aefad547 100644
--- a/drivers/reset/reset-ti-sci.c
+++ b/drivers/reset/reset-ti-sci.c
@@ -1,7 +1,7 @@
/*
* Texas Instrument's System Control Interface (TI-SCI) reset driver
*
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index a2635c21db7f..ef97c4dbbb4e 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -1,7 +1,7 @@
/*
* TI SYSCON regmap reset driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
* Suman Anna <afd@ti.com>
*
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 2b188b3bb69a..027990b79f61 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -9,8 +9,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-
-#include "reset-simple.h"
+#include <linux/reset/reset-simple.h>
#define MAX_CLKS 2
#define MAX_RSTS 2
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index d515d2cc20ed..a9370f4aacca 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,20 +8,12 @@ config IMX_GPCV2_PM_DOMAINS
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
-config IMX_SCU_SOC
- bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU
- select SOC_BUS
- help
- If you say yes here you get support for the NXP i.MX System
- Controller Unit SoC info module, it will provide the SoC info
- like SoC family, ID and revision etc.
-
config SOC_IMX8M
bool "i.MX8M SoC family support"
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
+ select ARM_GIC_V3 if ARCH_MXC
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 446143241fe7..078dc918f4f3 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,4 +5,3 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 87ee9f767b7a..dc644cfb6419 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -12,6 +12,7 @@
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_REG_TYPE 1
struct cmdq_instruction {
union {
@@ -21,8 +22,17 @@ struct cmdq_instruction {
union {
u16 offset;
u16 event;
+ u16 reg_dst;
+ };
+ union {
+ u8 subsys;
+ struct {
+ u8 sop:5;
+ u8 arg_c_t:1;
+ u8 src_t:1;
+ u8 dst_t:1;
+ };
};
- u8 subsys;
u8 op;
};
@@ -243,6 +253,21 @@ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
}
EXPORT_SYMBOL(cmdq_pkt_clear_event);
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = {};
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_set_event);
+
int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value)
{
@@ -278,7 +303,19 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
}
EXPORT_SYMBOL(cmdq_pkt_poll_mask);
-static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.reg_dst = reg_idx;
+ inst.value = value;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_assign);
+
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
struct cmdq_instruction inst = { {0} };
int err;
@@ -297,6 +334,7 @@ static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
return err;
}
+EXPORT_SYMBOL(cmdq_pkt_finalize);
static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
{
@@ -331,10 +369,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- err = cmdq_pkt_finalize(pkt);
- if (err < 0)
- return err;
-
pkt->cb.cb = cb;
pkt->cb.data = data;
pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 07bb261a63d2..899f8c066797 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -89,7 +89,7 @@ config QCOM_RMTFS_MEM
config QCOM_RPMH
bool "Qualcomm RPM-Hardened (RPMH) Communication"
- depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index bdcf16f88a97..4c9225f15c4e 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -278,13 +278,15 @@ static void pdr_indack_work(struct work_struct *work)
list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
pds = ind->pds;
- pdr_send_indack_msg(pdr, pds, ind->transaction_id);
mutex_lock(&pdr->status_lock);
pds->state = ind->curr_state;
pdr->status(pds->state, pds->service_path, pdr->priv);
mutex_unlock(&pdr->status_lock);
+ /* Ack the indication after clients release the PD resources */
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
mutex_lock(&pdr->list_lock);
list_del(&ind->node);
mutex_unlock(&pdr->list_lock);
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7d622ea1274e..d0e4f520cff8 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/console.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -90,8 +91,14 @@ struct geni_wrapper {
struct device *dev;
void __iomem *base;
struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+ struct geni_icc_path to_core;
};
+static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
+static struct geni_wrapper *earlycon_wrapper;
+
#define QUP_HW_VER_REG 0x4
/* Common SE registers */
@@ -720,11 +727,132 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+{
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+
+ se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]);
+ if (IS_ERR(se->icc_paths[i].path))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ err = PTR_ERR(se->icc_paths[i].path);
+ if (err != -EPROBE_DEFER)
+ dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n",
+ icc_names[i], err);
+ return err;
+
+}
+EXPORT_SYMBOL(geni_icc_get);
+
+int geni_icc_set_bw(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_set_bw(se->icc_paths[i].path,
+ se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_set_bw);
+
+void geni_icc_set_tag(struct geni_se *se, u32 tag)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
+ icc_set_tag(se->icc_paths[i].path, tag);
+}
+EXPORT_SYMBOL(geni_icc_set_tag);
+
+/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
+int geni_icc_enable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_enable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_enable);
+
+int geni_icc_disable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_disable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_disable);
+
+void geni_remove_earlycon_icc_vote(void)
+{
+ struct platform_device *pdev;
+ struct geni_wrapper *wrapper;
+ struct device_node *parent;
+ struct device_node *child;
+
+ if (!earlycon_wrapper)
+ return;
+
+ wrapper = earlycon_wrapper;
+ parent = of_get_next_parent(wrapper->dev->of_node);
+ for_each_child_of_node(parent, child) {
+ if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
+ continue;
+
+ pdev = of_find_device_by_node(child);
+ if (!pdev)
+ continue;
+
+ wrapper = platform_get_drvdata(pdev);
+ icc_put(wrapper->to_core.path);
+ wrapper->to_core.path = NULL;
+
+ }
+ of_node_put(parent);
+
+ earlycon_wrapper = NULL;
+}
+EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
+
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct geni_wrapper *wrapper;
+ struct console __maybe_unused *bcon;
+ bool __maybe_unused has_earlycon = false;
int ret;
wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -747,6 +875,43 @@ static int geni_se_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_SERIAL_EARLYCON
+ for_each_console(bcon) {
+ if (!strcmp(bcon->name, "qcom_geni")) {
+ has_earlycon = true;
+ break;
+ }
+ }
+ if (!has_earlycon)
+ goto exit;
+
+ wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
+ if (IS_ERR(wrapper->to_core.path))
+ return PTR_ERR(wrapper->to_core.path);
+ /*
+ * Put minmal BW request on core clocks on behalf of early console.
+ * The vote will be removed earlycon exit function.
+ *
+ * Note: We are putting vote on each QUP wrapper instead only to which
+ * earlycon is connected because QUP core clock of different wrapper
+ * share same voltage domain. If core1 is put to 0, then core2 will
+ * also run at 0, if not voted. Default ICC vote will be removed ASA
+ * we touch any of the core clock.
+ * core1 = core2 = max(core1, core2)
+ */
+ ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
+ GENI_DEFAULT_BW);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
+ earlycon_wrapper = wrapper;
+ of_node_put(pdev->dev.of_node);
+exit:
+#endif
dev_set_drvdata(dev, wrapper);
dev_dbg(dev, "GENI SE Driver probed\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 076fd27f3081..ae6675782581 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -175,13 +175,21 @@ static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
- u32 new_data;
+ int i;
writel(data, tcs_reg_addr(drv, reg, tcs_id));
- if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data,
- new_data == data, 1, USEC_PER_SEC))
- pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
- data, tcs_id, reg);
+
+ /*
+ * Wait until we read back the same value. Use a counter rather than
+ * ktime for timeout since this may be called after timekeeping stops.
+ */
+ for (i = 0; i < USEC_PER_SEC; i++) {
+ if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
+ return;
+ udelay(1);
+ }
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
}
/**
@@ -1023,6 +1031,7 @@ static struct platform_driver rpmh_driver = {
.driver = {
.name = "rpmh",
.of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index f2b5b46ccd1f..b61e183ede69 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -497,7 +497,7 @@ exit:
*
* Invalidate the sleep and wake values in batch_cache.
*/
-int rpmh_invalidate(const struct device *dev)
+void rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
struct batch_cache_req *req, *tmp;
@@ -509,7 +509,5 @@ int rpmh_invalidate(const struct device *dev)
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 005dd30c58fa..b93218cb50b5 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -20,6 +20,7 @@
* struct qcom_smd_rpm - state of the rpm device driver
* @rpm_channel: reference to the smd channel
* @icc: interconnect proxy device
+ * @dev: rpm device
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
* @ack_status: result of the rpm request
@@ -86,6 +87,7 @@ struct qcom_rpm_message {
/**
* qcom_rpm_smd_write - write @buf to @type:@id
* @rpm: rpm handle
+ * @state: active/sleep state flags
* @type: resource type
* @id: resource identifier
* @buf: the data to be written
@@ -230,9 +232,12 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
{ .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
{ .compatible = "qcom,rpm-sdm660" },
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 5983c6ffb078..e19102f46302 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -24,6 +24,7 @@
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
/*
* SMEM item id, used to acquire handles to respective
@@ -121,6 +122,16 @@ struct socinfo {
__le32 chip_family;
__le32 raw_device_family;
__le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_defective_parts;
+ __le32 ndefective_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
};
#ifdef CONFIG_DEBUG_FS
@@ -135,6 +146,12 @@ struct socinfo_params {
u32 raw_ver;
u32 hw_plat;
u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_defective_parts;
+ u32 ndefective_parts_array_offset;
+ u32 nmodem_supported;
};
struct smem_image_version {
@@ -202,8 +219,10 @@ static const struct soc_id soc_id[] = {
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 318, "SDM630" },
{ 321, "SDM845" },
{ 341, "SDA845" },
+ { 356, "SM8250" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -256,7 +275,10 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
if (model < 0)
return -EINVAL;
- seq_printf(seq, "%s\n", pmic_models[model]);
+ if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
return 0;
}
@@ -272,9 +294,19 @@ static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
return 0;
}
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
QCOM_OPEN(build_id, qcom_show_build_id);
QCOM_OPEN(pmic_model, qcom_show_pmic_model);
QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
#define DEFINE_IMAGE_OPS(type) \
static int show_image_##type(struct seq_file *seq, void *p) \
@@ -312,7 +344,38 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+ debugfs_create_x32("info_fmt", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_defective_parts = __le32_to_cpu(info->num_defective_parts);
+ qcom_socinfo->info.ndefective_parts_array_offset = __le32_to_cpu(info->ndefective_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_defective_parts", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_defective_parts);
+ debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ndefective_parts_array_offset);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ /* Fall through */
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 53cd8d2d0cd2..30984659df90 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -201,6 +201,13 @@ config ARCH_R8A774C0
help
This enables support for the Renesas RZ/G2E SoC.
+config ARCH_R8A774E1
+ bool "Renesas RZ/G2H SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774E1
+ help
+ This enables support for the Renesas RZ/G2H SoC.
+
config ARCH_R8A77950
bool "Renesas R-Car H3 ES1.x SoC Platform"
select ARCH_RCAR_GEN3
@@ -296,6 +303,10 @@ config SYSC_R8A774C0
bool "RZ/G2E System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A774E1
+ bool "RZ/G2H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A7779
bool "R-Car H1 System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 08296d78e2ad..10a399fc486a 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
obj-$(CONFIG_SYSC_R8A774A1) += r8a774a1-sysc.o
obj-$(CONFIG_SYSC_R8A774B1) += r8a774b1-sysc.o
obj-$(CONFIG_SYSC_R8A774C0) += r8a774c0-sysc.o
+obj-$(CONFIG_SYSC_R8A774E1) += r8a774e1-sysc.o
obj-$(CONFIG_SYSC_R8A7779) += r8a7779-sysc.o
obj-$(CONFIG_SYSC_R8A7790) += r8a7790-sysc.o
obj-$(CONFIG_SYSC_R8A7791) += r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a774e1-sysc.c b/drivers/soc/renesas/r8a774e1-sysc.c
new file mode 100644
index 000000000000..18449f746455
--- /dev/null
+++ b/drivers/soc/renesas/r8a774e1-sysc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2H System Controller
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car H3 System Controller
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a774e1-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a774e1_areas[] __initconst = {
+ { "always-on", 0, 0, R8A774E1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A774E1_PD_CA57_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A774E1_PD_CA57_CPU0, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A774E1_PD_CA57_CPU1, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu2", 0x80, 2, R8A774E1_PD_CA57_CPU2, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu3", 0x80, 3, R8A774E1_PD_CA57_CPU3, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A774E1_PD_CA53_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A774E1_PD_CA53_CPU0, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A774E1_PD_CA53_CPU1, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A774E1_PD_CA53_CPU2, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A774E1_PD_CA53_CPU3, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "a3vp", 0x340, 0, R8A774E1_PD_A3VP, R8A774E1_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A774E1_PD_A3VC, R8A774E1_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A774E1_PD_A2VC1, R8A774E1_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A774E1_PD_3DG_A, R8A774E1_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A774E1_PD_3DG_B, R8A774E1_PD_3DG_A },
+ { "3dg-c", 0x100, 2, R8A774E1_PD_3DG_C, R8A774E1_PD_3DG_B },
+ { "3dg-d", 0x100, 3, R8A774E1_PD_3DG_D, R8A774E1_PD_3DG_C },
+ { "3dg-e", 0x100, 4, R8A774E1_PD_3DG_E, R8A774E1_PD_3DG_D },
+};
+
+const struct rcar_sysc_info r8a774e1_sysc_info __initconst = {
+ .areas = r8a774e1_areas,
+ .num_areas = ARRAY_SIZE(r8a774e1_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index a2b2b1768768..a932015ce9c1 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -48,6 +48,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a774a1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774b1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774c0-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774e1-rst", .data = &rcar_rst_gen3 },
/* R-Car Gen1 */
{ .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
{ .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 04ea87a188f1..9b235fc90027 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -296,6 +296,9 @@ static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A774C0
{ .compatible = "renesas,r8a774c0-sysc", .data = &r8a774c0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A774E1
+ { .compatible = "renesas,r8a774e1-sysc", .data = &r8a774e1_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index e417f26fe155..8d861c1cfdf7 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -56,6 +56,7 @@ extern const struct rcar_sysc_info r8a77470_sysc_info;
extern const struct rcar_sysc_info r8a774a1_sysc_info;
extern const struct rcar_sysc_info r8a774b1_sysc_info;
extern const struct rcar_sysc_info r8a774c0_sysc_info;
+extern const struct rcar_sysc_info r8a774e1_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 35dba8b8814e..f815a6a8b88b 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -126,6 +126,11 @@ static const struct renesas_soc soc_rz_g2e __initconst __maybe_unused = {
.id = 0x57,
};
+static const struct renesas_soc soc_rz_g2h __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x4f,
+};
+
static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
.family = &fam_rcar_gen1,
};
@@ -238,6 +243,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A774C0
{ .compatible = "renesas,r8a774c0", .data = &soc_rz_g2e },
#endif
+#ifdef CONFIG_ARCH_R8A774E1
+ { .compatible = "renesas,r8a774e1", .data = &soc_rz_g2h },
+#endif
#ifdef CONFIG_ARCH_R8A7778
{ .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
#endif
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index c7a2003687c7..264185664594 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -37,4 +37,7 @@ config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+config EXYNOS_REGULATOR_COUPLER
+ bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index edd1d6ea064d..ecc3a32f6406 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
+obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
diff --git a/drivers/soc/samsung/exynos-regulator-coupler.c b/drivers/soc/samsung/exynos-regulator-coupler.c
new file mode 100644
index 000000000000..61a156b44a48
--- /dev/null
+++ b/drivers/soc/samsung/exynos-regulator-coupler.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Simplified generic voltage coupler from regulator core.c
+ * The main difference is that it keeps current regulator voltage
+ * if consumers didn't apply their constraints yet.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
+ int *current_uV,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
+ struct regulation_constraints *constraints = rdev->constraints;
+ int desired_min_uV = 0, desired_max_uV = INT_MAX;
+ int max_current_uV = 0, min_current_uV = INT_MAX;
+ int highest_min_uV = 0, target_uV, possible_uV;
+ int i, ret, max_spread, n_coupled = c_desc->n_coupled;
+ bool done;
+
+ *current_uV = -1;
+
+ /* Find highest min desired voltage */
+ for (i = 0; i < n_coupled; i++) {
+ int tmp_min = 0;
+ int tmp_max = INT_MAX;
+
+ lockdep_assert_held_once(&c_rdevs[i]->mutex.base);
+
+ ret = regulator_check_consumers(c_rdevs[i],
+ &tmp_min,
+ &tmp_max, state);
+ if (ret < 0)
+ return ret;
+
+ if (tmp_min == 0) {
+ ret = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (ret < 0)
+ return ret;
+ tmp_min = ret;
+ }
+
+ /* apply constraints */
+ ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max);
+ if (ret < 0)
+ return ret;
+
+ highest_min_uV = max(highest_min_uV, tmp_min);
+
+ if (i == 0) {
+ desired_min_uV = tmp_min;
+ desired_max_uV = tmp_max;
+ }
+ }
+
+ max_spread = constraints->max_spread[0];
+
+ /*
+ * Let target_uV be equal to the desired one if possible.
+ * If not, set it to minimum voltage, allowed by other coupled
+ * regulators.
+ */
+ target_uV = max(desired_min_uV, highest_min_uV - max_spread);
+
+ /*
+ * Find min and max voltages, which currently aren't violating
+ * max_spread.
+ */
+ for (i = 1; i < n_coupled; i++) {
+ int tmp_act;
+
+ tmp_act = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (tmp_act < 0)
+ return tmp_act;
+
+ min_current_uV = min(tmp_act, min_current_uV);
+ max_current_uV = max(tmp_act, max_current_uV);
+ }
+
+ /*
+ * Correct target voltage, so as it currently isn't
+ * violating max_spread
+ */
+ possible_uV = max(target_uV, max_current_uV - max_spread);
+ possible_uV = min(possible_uV, min_current_uV + max_spread);
+
+ if (possible_uV > desired_max_uV)
+ return -EINVAL;
+
+ done = (possible_uV == target_uV);
+ desired_min_uV = possible_uV;
+
+ /* Set current_uV if wasn't done earlier in the code and if necessary */
+ if (*current_uV == -1) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ *current_uV = ret;
+ }
+
+ *min_uV = desired_min_uV;
+ *max_uV = desired_max_uV;
+
+ return done;
+}
+
+static int exynos_coupler_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator_dev **c_rdevs;
+ struct regulator_dev *best_rdev;
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
+ unsigned int delta, best_delta;
+ unsigned long c_rdev_done = 0;
+ bool best_c_rdev_done;
+
+ c_rdevs = c_desc->coupled_rdevs;
+ n_coupled = c_desc->n_coupled;
+
+ /*
+ * Find the best possible voltage change on each loop. Leave the loop
+ * if there isn't any possible change.
+ */
+ do {
+ best_c_rdev_done = false;
+ best_delta = 0;
+ best_min_uV = 0;
+ best_max_uV = 0;
+ best_c_rdev = 0;
+ best_rdev = NULL;
+
+ /*
+ * Find highest difference between optimal voltage
+ * and current voltage.
+ */
+ for (i = 0; i < n_coupled; i++) {
+ /*
+ * optimal_uV is the best voltage that can be set for
+ * i-th regulator at the moment without violating
+ * max_spread constraint in order to balance
+ * the coupled voltages.
+ */
+ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
+
+ if (test_bit(i, &c_rdev_done))
+ continue;
+
+ ret = regulator_get_optimal_voltage(c_rdevs[i],
+ &current_uV,
+ &optimal_uV,
+ &optimal_max_uV,
+ state);
+ if (ret < 0)
+ goto out;
+
+ delta = abs(optimal_uV - current_uV);
+
+ if (delta && best_delta <= delta) {
+ best_c_rdev_done = ret;
+ best_delta = delta;
+ best_rdev = c_rdevs[i];
+ best_min_uV = optimal_uV;
+ best_max_uV = optimal_max_uV;
+ best_c_rdev = i;
+ }
+ }
+
+ /* Nothing to change, return successfully */
+ if (!best_rdev) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = regulator_set_voltage_rdev(best_rdev, best_min_uV,
+ best_max_uV, state);
+
+ if (ret < 0)
+ goto out;
+
+ if (best_c_rdev_done)
+ set_bit(best_c_rdev, &c_rdev_done);
+
+ } while (n_coupled > 1);
+
+out:
+ return ret;
+}
+
+static int exynos_coupler_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static struct regulator_coupler exynos_coupler = {
+ .attach_regulator = exynos_coupler_attach,
+ .balance_voltage = exynos_coupler_balance_voltage,
+};
+
+static int __init exynos_coupler_init(void)
+{
+ if (!of_machine_is_compatible("samsung,exynos5800"))
+ return 0;
+
+ return regulator_coupler_register(&exynos_coupler);
+}
+arch_initcall(exynos_coupler_init);
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 3cdd69d1bd4d..8e416ad91ee2 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -27,7 +27,7 @@ static u32 chipid;
u32 tegra_read_chipid(void)
{
- WARN(!chipid, "Tegra ABP MISC not yet available\n");
+ WARN(!chipid, "Tegra APB MISC not yet available\n");
return chipid;
}
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 5fb2ee2ac978..6dcc21dde0cb 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -109,6 +109,21 @@ struct k3_ring_ops {
};
/**
+ * struct k3_ring_state - Internal state tracking structure
+ *
+ * @free: Number of free entries
+ * @occ: Occupancy
+ * @windex: Write index
+ * @rindex: Read index
+ */
+struct k3_ring_state {
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+};
+
+/**
* struct k3_ring - RA Ring descriptor
*
* @rt: Ring control/status registers
@@ -121,10 +136,6 @@ struct k3_ring_ops {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
- * @free: Number of free elements
- * @occ: Ring occupancy
- * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
- * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
@@ -143,16 +154,17 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
- u32 free;
- u32 occ;
- u32 windex;
- u32 rindex;
+ struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
};
+struct k3_ringacc_ops {
+ int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
+};
+
/**
* struct k3_ringacc - Rings accelerator descriptor
*
@@ -171,6 +183,7 @@ struct k3_ring {
* @tisci: pointer ti-sci handle
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
+ * @ops: SoC specific ringacc operation
*/
struct k3_ringacc {
struct device *dev;
@@ -191,6 +204,8 @@ struct k3_ringacc {
const struct ti_sci_handle *tisci;
const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
u32 tisci_dev_id;
+
+ const struct k3_ringacc_ops *ops;
};
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
@@ -245,6 +260,7 @@ static void k3_ringacc_ring_dump(struct k3_ring *ring)
&ring->ring_mem_dma);
dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+ dev_dbg(dev, "dump flags %08X\n", ring->flags);
dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
@@ -313,6 +329,30 @@ error:
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ if (!fwd_ring || !compl_ring)
+ return -EINVAL;
+
+ *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
+ if (!(*fwd_ring))
+ return -ENODEV;
+
+ *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
+ if (!(*compl_ring)) {
+ k3_ringacc_ring_free(*fwd_ring);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
+
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
struct k3_ringacc *ringacc = ring->parent;
@@ -339,10 +379,7 @@ void k3_ringacc_ring_reset(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return;
- ring->occ = 0;
- ring->free = 0;
- ring->rindex = 0;
- ring->windex = 0;
+ memset(&ring->state, 0, sizeof(ring->state));
k3_ringacc_ring_reset_sci(ring);
}
@@ -556,11 +593,13 @@ static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
{
- struct k3_ringacc *ringacc = ring->parent;
+ struct k3_ringacc *ringacc;
int ret = 0;
if (!ring || !cfg)
return -EINVAL;
+ ringacc = ring->parent;
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -590,10 +629,7 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
ring->size = cfg->size;
ring->elm_size = cfg->elm_size;
ring->mode = cfg->mode;
- ring->occ = 0;
- ring->free = 0;
- ring->rindex = 0;
- ring->windex = 0;
+ memset(&ring->state, 0, sizeof(ring->state));
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
ring->proxy = ringacc->proxy_target_base +
@@ -613,7 +649,7 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
ring->ops = NULL;
ret = -EINVAL;
goto err_free_proxy;
- };
+ }
ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
ring->size * (4 << ring->elm_size),
@@ -664,10 +700,10 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->free)
- ring->free = ring->size - readl(&ring->rt->occ);
+ if (!ring->state.free)
+ ring->state.free = ring->size - readl(&ring->rt->occ);
- return ring->free;
+ return ring->state.free;
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
@@ -738,7 +774,7 @@ static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
"proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_fromio(elem, ptr, (4 << ring->elm_size));
- ring->occ--;
+ ring->state.occ--;
break;
case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
@@ -746,14 +782,14 @@ static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
"proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_toio(ptr, elem, (4 << ring->elm_size));
- ring->free--;
+ ring->state.free--;
break;
default:
return -EINVAL;
}
- dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
- ring->occ);
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
+ ring->state.occ);
return 0;
}
@@ -808,7 +844,7 @@ static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
"memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_fromio(elem, ptr, (4 << ring->elm_size));
- ring->occ--;
+ ring->state.occ--;
break;
case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
@@ -816,14 +852,15 @@ static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
"memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_toio(ptr, elem, (4 << ring->elm_size));
- ring->free--;
+ ring->state.free--;
break;
default:
return -EINVAL;
}
- dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
- ring->windex, ring->occ, ring->rindex);
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
+ ring->state.free, ring->state.windex, ring->state.occ,
+ ring->state.rindex);
return 0;
}
@@ -855,16 +892,16 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
- elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
- ring->windex = (ring->windex + 1) % ring->size;
- ring->free--;
+ ring->state.windex = (ring->state.windex + 1) % ring->size;
+ ring->state.free--;
writel(1, &ring->rt->db);
dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
- ring->free, ring->windex);
+ ring->state.free, ring->state.windex);
return 0;
}
@@ -873,16 +910,16 @@ static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
- elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
memcpy(elem, elem_ptr, (4 << ring->elm_size));
- ring->rindex = (ring->rindex + 1) % ring->size;
- ring->occ--;
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
writel(-1, &ring->rt->db);
dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
- ring->occ, ring->rindex, elem_ptr);
+ ring->state.occ, ring->state.rindex, elem_ptr);
return 0;
}
@@ -893,8 +930,8 @@ int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
- ring->windex);
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
+ ring->state.free, ring->state.windex);
if (k3_ringacc_ring_is_full(ring))
return -ENOMEM;
@@ -914,7 +951,7 @@ int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
return -EINVAL;
dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
- ring->free, ring->windex);
+ ring->state.free, ring->state.windex);
if (k3_ringacc_ring_is_full(ring))
return -ENOMEM;
@@ -933,13 +970,13 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->occ)
- ring->occ = k3_ringacc_ring_get_occ(ring);
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
- dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
- ring->rindex);
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
+ ring->state.rindex);
- if (!ring->occ)
+ if (!ring->state.occ)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -956,13 +993,13 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->occ)
- ring->occ = k3_ringacc_ring_get_occ(ring);
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
- dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
- ring->rindex);
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
+ ring->state.occ, ring->state.rindex);
- if (!ring->occ)
+ if (!ring->state.occ)
return -ENODATA;
if (ring->ops && ring->ops->pop_tail)
@@ -1047,21 +1084,14 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
ringacc->rm_gp_range);
}
-static int k3_ringacc_probe(struct platform_device *pdev)
+static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
{
- struct k3_ringacc *ringacc;
void __iomem *base_fifo, *base_rt;
struct device *dev = &pdev->dev;
struct resource *res;
int ret, i;
- ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
- if (!ringacc)
- return -ENOMEM;
-
- ringacc->dev = dev;
- mutex_init(&ringacc->req_lock);
-
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi_domain) {
@@ -1120,14 +1150,9 @@ static int k3_ringacc_probe(struct platform_device *pdev)
ringacc->rings[i].ring_id = i;
ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
}
- dev_set_drvdata(dev, ringacc);
ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
- mutex_lock(&k3_ringacc_list_lock);
- list_add_tail(&ringacc->list, &k3_ringacc_list);
- mutex_unlock(&k3_ringacc_list_lock);
-
dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
ringacc->num_rings,
ringacc->rm_gp_range->desc[0].start,
@@ -1137,15 +1162,60 @@ static int k3_ringacc_probe(struct platform_device *pdev)
ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+
return 0;
}
+struct ringacc_match_data {
+ struct k3_ringacc_ops ops;
+};
+
+static struct ringacc_match_data k3_ringacc_data = {
+ .ops = {
+ .init = k3_ringacc_init,
+ },
+};
+
/* Match table for of_platform binding */
static const struct of_device_id k3_ringacc_of_match[] = {
- { .compatible = "ti,am654-navss-ringacc", },
+ { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
{},
};
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ const struct ringacc_match_data *match_data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ int ret;
+
+ match = of_match_node(k3_ringacc_of_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ match_data = match->data;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+ ringacc->ops = &match_data->ops;
+
+ ret = ringacc->ops->init(pdev, ringacc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ringacc);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ return 0;
+}
+
static struct platform_driver k3_ringacc_driver = {
.probe = k3_ringacc_probe,
.driver = {
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 1762d89fc05d..fde66e28e046 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range)
return 0;
}
-struct knav_range_ops knav_acc_range_ops = {
+static struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
index d64feeb51a40..a9472e0e5d61 100644
--- a/drivers/soc/ux500/ux500-soc-id.c
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -146,9 +146,8 @@ static const char * __init ux500_get_revision(void)
return kasprintf(GFP_KERNEL, "%s", "Unknown");
}
-static ssize_t ux500_get_process(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+process_show(struct device *dev, struct device_attribute *attr, char *buf)
{
if (dbx500_id.process == 0x00)
return sprintf(buf, "Standard\n");
@@ -156,6 +155,15 @@ static ssize_t ux500_get_process(struct device *dev,
return sprintf(buf, "%02xnm\n", dbx500_id.process);
}
+static DEVICE_ATTR_RO(process);
+
+static struct attribute *ux500_soc_attrs[] = {
+ &dev_attr_process.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(ux500_soc);
+
static const char *db8500_read_soc_id(struct device_node *backupram)
{
void __iomem *base;
@@ -184,14 +192,11 @@ static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
soc_dev_attr->machine = ux500_get_machine();
soc_dev_attr->family = ux500_get_family();
soc_dev_attr->revision = ux500_get_revision();
+ soc_dev_attr->custom_attr_group = ux500_soc_groups[0];
}
-static const struct device_attribute ux500_soc_attr =
- __ATTR(process, S_IRUGO, ux500_get_process, NULL);
-
static int __init ux500_soc_device_init(void)
{
- struct device *parent;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *backupram;
@@ -217,9 +222,6 @@ static int __init ux500_soc_device_init(void)
return PTR_ERR(soc_dev);
}
- parent = soc_device_to_device(soc_dev);
- device_create_file(parent, &ux500_soc_attr);
-
return 0;
}
subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index ae13fa2aa582..7dcf77ccd31e 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -56,45 +56,47 @@ static const char *integrator_fpga_str(u32 id)
}
}
-static ssize_t integrator_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", integrator_coreid >> 24);
}
-static struct device_attribute integrator_manf_attr =
- __ATTR(manufacturer, S_IRUGO, integrator_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t integrator_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+arch_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
}
-static struct device_attribute integrator_arch_attr =
- __ATTR(arch, S_IRUGO, integrator_get_arch, NULL);
+static DEVICE_ATTR_RO(arch);
-static ssize_t integrator_get_fpga(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
}
-static struct device_attribute integrator_fpga_attr =
- __ATTR(fpga, S_IRUGO, integrator_get_fpga, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t integrator_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
}
-static struct device_attribute integrator_build_attr =
- __ATTR(build, S_IRUGO, integrator_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *integrator_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_arch.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(integrator);
static int __init integrator_soc_init(void)
{
@@ -127,6 +129,7 @@ static int __init integrator_soc_init(void)
soc_dev_attr->soc_id = "Integrator";
soc_dev_attr->machine = "Integrator";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = integrator_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -134,11 +137,6 @@ static int __init integrator_soc_init(void)
}
dev = soc_device_to_device(soc_dev);
- device_create_file(dev, &integrator_manf_attr);
- device_create_file(dev, &integrator_arch_attr);
- device_create_file(dev, &integrator_fpga_attr);
- device_create_file(dev, &integrator_build_attr);
-
dev_info(dev, "Detected ARM core module:\n");
dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index 9471353dd8c3..c6876d232d8f 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -39,45 +39,47 @@ static const char *realview_arch_str(u32 id)
}
}
-static ssize_t realview_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", realview_coreid >> 24);
}
-static struct device_attribute realview_manf_attr =
- __ATTR(manufacturer, S_IRUGO, realview_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t realview_get_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+board_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
}
-static struct device_attribute realview_board_attr =
- __ATTR(board, S_IRUGO, realview_get_board, NULL);
+static DEVICE_ATTR_RO(board);
-static ssize_t realview_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", realview_arch_str(realview_coreid));
}
-static struct device_attribute realview_arch_attr =
- __ATTR(fpga, S_IRUGO, realview_get_arch, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t realview_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (realview_coreid & 0xFF));
}
-static struct device_attribute realview_build_attr =
- __ATTR(build, S_IRUGO, realview_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *realview_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_board.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(realview);
static int realview_soc_probe(struct platform_device *pdev)
{
@@ -102,6 +104,7 @@ static int realview_soc_probe(struct platform_device *pdev)
soc_dev_attr->machine = "RealView";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = realview_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -112,11 +115,6 @@ static int realview_soc_probe(struct platform_device *pdev)
if (ret)
return -ENODEV;
- device_create_file(soc_device_to_device(soc_dev), &realview_manf_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_board_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
-
dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
realview_coreid,
((realview_coreid >> 16) & 0xfff));
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index c3972424af71..7a88cc036004 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -7,6 +7,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/spi/spi.h>
@@ -76,7 +77,9 @@ struct spi_geni_master {
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
+ u32 last_mode;
unsigned long cur_speed_hz;
+ unsigned long cur_sclk_hz;
unsigned int cur_bits_per_word;
unsigned int tx_rem_bytes;
unsigned int rx_rem_bytes;
@@ -95,7 +98,6 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
{
unsigned long sclk_freq;
unsigned int actual_hz;
- struct geni_se *se = &mas->se;
int ret;
ret = geni_se_clk_freq_match(&mas->se,
@@ -112,9 +114,12 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
actual_hz, sclk_freq, *clk_idx, *clk_div);
- ret = clk_set_rate(se->clk, sclk_freq);
+ ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
if (ret)
- dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
+ dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
+ else
+ mas->cur_sclk_hz = sclk_freq;
+
return ret;
}
@@ -177,8 +182,6 @@ static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
struct geni_se *se = &mas->se;
u32 word_len;
- word_len = readl(se->base + SE_SPI_WORD_LEN);
-
/*
* If bits_per_word isn't a byte aligned value, set the packing to be
* 1 SPI word per FIFO word.
@@ -187,74 +190,94 @@ static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
pack_words = mas->fifo_width_bits / bits_per_word;
else
pack_words = 1;
- word_len &= ~WORD_LEN_MSK;
- word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
true, true);
+ word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
writel(word_len, se->base + SE_SPI_WORD_LEN);
}
-static int setup_fifo_params(struct spi_device *spi_slv,
- struct spi_master *spi)
+static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
+ unsigned long clk_hz)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ u32 clk_sel, m_clk_cfg, idx, div;
struct geni_se *se = &mas->se;
- u32 loopback_cfg, cpol, cpha, demux_output_inv;
- u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
int ret;
- loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
- cpol = readl(se->base + SE_SPI_CPOL);
- cpha = readl(se->base + SE_SPI_CPHA);
- demux_output_inv = 0;
- loopback_cfg &= ~LOOPBACK_MSK;
- cpol &= ~CPOL;
- cpha &= ~CPHA;
-
- if (spi_slv->mode & SPI_LOOP)
- loopback_cfg |= LOOPBACK_ENABLE;
-
- if (spi_slv->mode & SPI_CPOL)
- cpol |= CPOL;
-
- if (spi_slv->mode & SPI_CPHA)
- cpha |= CPHA;
-
- if (spi_slv->mode & SPI_CS_HIGH)
- demux_output_inv = BIT(spi_slv->chip_select);
-
- demux_sel = spi_slv->chip_select;
- mas->cur_speed_hz = spi_slv->max_speed_hz;
- mas->cur_bits_per_word = spi_slv->bits_per_word;
+ if (clk_hz == mas->cur_speed_hz)
+ return 0;
- ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+ ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
if (ret) {
- dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
- ret, mas->cur_speed_hz);
+ dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
return ret;
}
+ /*
+ * SPI core clock gets configured with the requested frequency
+ * or the frequency closer to the requested frequency.
+ * For that reason requested frequency is stored in the
+ * cur_speed_hz and referred in the consecutive transfer instead
+ * of calling clk_get_rate() API.
+ */
+ mas->cur_speed_hz = clk_hz;
+
clk_sel = idx & CLK_SEL_MSK;
m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
- spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
- writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
- writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
- writel(cpha, se->base + SE_SPI_CPHA);
- writel(cpol, se->base + SE_SPI_CPOL);
- writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
writel(clk_sel, se->base + SE_GENI_CLK_SEL);
writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
+
+ /* Set BW quota for CPU as driver supports FIFO mode only. */
+ se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
return 0;
}
+static int setup_fifo_params(struct spi_device *spi_slv,
+ struct spi_master *spi)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
+ u32 demux_sel;
+
+ if (mas->last_mode != spi_slv->mode) {
+ if (spi_slv->mode & SPI_LOOP)
+ loopback_cfg = LOOPBACK_ENABLE;
+
+ if (spi_slv->mode & SPI_CPOL)
+ cpol = CPOL;
+
+ if (spi_slv->mode & SPI_CPHA)
+ cpha = CPHA;
+
+ if (spi_slv->mode & SPI_CS_HIGH)
+ demux_output_inv = BIT(spi_slv->chip_select);
+
+ demux_sel = spi_slv->chip_select;
+ mas->cur_bits_per_word = spi_slv->bits_per_word;
+
+ spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+ writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
+ writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
+ writel(cpha, se->base + SE_SPI_CPHA);
+ writel(cpol, se->base + SE_SPI_CPOL);
+ writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
+
+ mas->last_mode = spi_slv->mode;
+ }
+
+ return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
+}
+
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
- struct geni_se *se = &mas->se;
- geni_se_select_mode(se, GENI_SE_FIFO);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -295,6 +318,8 @@ static int spi_geni_init(struct spi_geni_master *mas)
else
mas->oversampling = 1;
+ geni_se_select_mode(se, GENI_SE_FIFO);
+
pm_runtime_put(mas->dev);
return 0;
}
@@ -306,6 +331,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
u32 m_cmd = 0;
u32 spi_tx_cfg, len;
struct geni_se *se = &mas->se;
+ int ret;
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
if (xfer->bits_per_word != mas->cur_bits_per_word) {
@@ -314,29 +340,9 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
}
/* Speed and bits per word can be overridden per transfer */
- if (xfer->speed_hz != mas->cur_speed_hz) {
- int ret;
- u32 clk_sel, m_clk_cfg;
- unsigned int idx, div;
-
- ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
- if (ret) {
- dev_err(mas->dev, "Err setting clks:%d\n", ret);
- return;
- }
- /*
- * SPI core clock gets configured with the requested frequency
- * or the frequency closer to the requested frequency.
- * For that reason requested frequency is stored in the
- * cur_speed_hz and referred in the consecutive transfer instead
- * of calling clk_get_rate() API.
- */
- mas->cur_speed_hz = xfer->speed_hz;
- clk_sel = idx & CLK_SEL_MSK;
- m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
- writel(clk_sel, se->base + SE_GENI_CLK_SEL);
- writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
- }
+ ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
+ if (ret)
+ return;
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
@@ -561,6 +567,17 @@ static int spi_geni_probe(struct platform_device *pdev)
mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
+ mas->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se");
+ if (IS_ERR(mas->se.opp_table))
+ return PTR_ERR(mas->se.opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ mas->se.has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
spi->bus_num = -1;
spi->dev.of_node = dev->of_node;
@@ -578,6 +595,17 @@ static int spi_geni_probe(struct platform_device *pdev)
spin_lock_init(&mas->lock);
pm_runtime_enable(dev);
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+ /* Set the bus quota to a reasonable value for register access */
+ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ ret = geni_icc_set_bw(&mas->se);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
@@ -596,6 +624,9 @@ spi_geni_probe_free_irq:
spi_geni_probe_runtime_disable:
pm_runtime_disable(dev);
spi_master_put(spi);
+ if (mas->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(mas->se.opp_table);
return ret;
}
@@ -609,6 +640,9 @@ static int spi_geni_remove(struct platform_device *pdev)
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
+ if (mas->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(mas->se.opp_table);
return 0;
}
@@ -616,16 +650,33 @@ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+
+ ret = geni_se_resources_off(&mas->se);
+ if (ret)
+ return ret;
- return geni_se_resources_off(&mas->se);
+ return geni_icc_disable(&mas->se);
}
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ ret = geni_icc_enable(&mas->se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(&mas->se);
+ if (ret)
+ return ret;
- return geni_se_resources_on(&mas->se);
+ return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
}
static int __maybe_unused spi_geni_suspend(struct device *dev)
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 3c4f83bf7084..b8857a97f40a 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -2,12 +2,14 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -139,7 +141,11 @@ struct qcom_qspi {
struct device *dev;
struct clk_bulk_data *clks;
struct qspi_xfer xfer;
- /* Lock to protect xfer and IRQ accessed registers */
+ struct icc_path *icc_path_cpu_to_qspi;
+ struct opp_table *opp_table;
+ bool has_opp_table;
+ unsigned long last_speed;
+ /* Lock to protect data accessed by IRQs */
spinlock_t lock;
};
@@ -221,6 +227,38 @@ static void qcom_qspi_handle_err(struct spi_master *master,
spin_unlock_irqrestore(&ctrl->lock, flags);
}
+static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
+{
+ int ret;
+ unsigned int avg_bw_cpu;
+
+ if (speed_hz == ctrl->last_speed)
+ return 0;
+
+ /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
+ ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set BW quota for CPU as driver supports FIFO mode only.
+ * We don't have explicit peak requirement so keep it equal to avg_bw.
+ */
+ avg_bw_cpu = Bps_to_icc(speed_hz);
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ctrl->last_speed = speed_hz;
+
+ return 0;
+}
+
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
@@ -234,12 +272,9 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
- /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
- ret = clk_set_rate(ctrl->clks[QSPI_CLK_CORE].clk, speed_hz * 4);
- if (ret) {
- dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ ret = qcom_qspi_set_speed(ctrl, speed_hz);
+ if (ret)
return ret;
- }
spin_lock_irqsave(&ctrl->lock, flags);
@@ -458,6 +493,29 @@ static int qcom_qspi_probe(struct platform_device *pdev)
if (ret)
goto exit_probe_master_put;
+ ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
+ if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) {
+ ret = PTR_ERR(ctrl->icc_path_cpu_to_qspi);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get cpu path: %d\n", ret);
+ goto exit_probe_master_put;
+ }
+ /* Set BW vote for register access */
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
+ Bps_to_icc(1000));
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ goto exit_probe_master_put;
+ }
+
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ goto exit_probe_master_put;
+ }
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto exit_probe_master_put;
@@ -481,6 +539,22 @@ static int qcom_qspi_probe(struct platform_device *pdev)
master->handle_err = qcom_qspi_handle_err;
master->auto_runtime_pm = true;
+ ctrl->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
+ if (IS_ERR(ctrl->opp_table)) {
+ ret = PTR_ERR(ctrl->opp_table);
+ goto exit_probe_master_put;
+ }
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ ctrl->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ goto exit_probe_master_put;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
ret = spi_register_master(master);
@@ -488,6 +562,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return 0;
pm_runtime_disable(dev);
+ if (ctrl->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(ctrl->opp_table);
exit_probe_master_put:
spi_master_put(master);
@@ -498,11 +575,15 @@ exit_probe_master_put:
static int qcom_qspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
+ if (ctrl->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(ctrl->opp_table);
return 0;
}
@@ -511,9 +592,19 @@ static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
return 0;
}
@@ -521,8 +612,20 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+
+ ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ return ret;
- return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
}
static int __maybe_unused qcom_qspi_suspend(struct device *dev)
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 99698b8a3a74..b373b1b08b6d 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -17,6 +17,7 @@
#include <linux/tee_drv.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include "optee_private.h"
#include "optee_smc.h"
#include "shm_pool.h"
@@ -218,6 +219,11 @@ static void optee_get_version(struct tee_device *teedev,
*vers = v;
}
+static void optee_bus_scan(struct work_struct *work)
+{
+ WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
+}
+
static int optee_open(struct tee_context *ctx)
{
struct optee_context_data *ctxdata;
@@ -241,8 +247,18 @@ static int optee_open(struct tee_context *ctx)
kfree(ctxdata);
return -EBUSY;
}
- }
+ if (!optee->scan_bus_done) {
+ INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
+ optee->scan_bus_wq = create_workqueue("optee_bus_scan");
+ if (!optee->scan_bus_wq) {
+ kfree(ctxdata);
+ return -ECHILD;
+ }
+ queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ optee->scan_bus_done = true;
+ }
+ }
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
@@ -296,8 +312,13 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL;
- if (teedev == optee->supp_teedev)
+ if (teedev == optee->supp_teedev) {
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
+ }
optee_supp_release(&optee->supp);
+ }
}
static const struct tee_driver_ops optee_ops = {
@@ -675,7 +696,7 @@ static int optee_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, optee);
- rc = optee_enumerate_devices();
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) {
optee_remove(pdev);
return rc;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index e3a148521ec1..7a897d51969f 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -11,18 +11,6 @@
#include <linux/uuid.h>
#include "optee_private.h"
-/*
- * Get device UUIDs
- *
- * [out] memref[0] Array of device UUIDs
- *
- * Return codes:
- * TEE_SUCCESS - Invoke command success
- * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
- * TEE_ERROR_SHORT_BUFFER - Output buffer size less than required
- */
-#define PTA_CMD_GET_DEVICES 0x0
-
static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
if (ver->impl_id == TEE_IMPL_ID_OPTEE)
@@ -32,7 +20,8 @@ static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
}
static int get_devices(struct tee_context *ctx, u32 session,
- struct tee_shm *device_shm, u32 *shm_size)
+ struct tee_shm *device_shm, u32 *shm_size,
+ u32 func)
{
int ret = 0;
struct tee_ioctl_invoke_arg inv_arg;
@@ -41,8 +30,7 @@ static int get_devices(struct tee_context *ctx, u32 session,
memset(&inv_arg, 0, sizeof(inv_arg));
memset(&param, 0, sizeof(param));
- /* Invoke PTA_CMD_GET_DEVICES function */
- inv_arg.func = PTA_CMD_GET_DEVICES;
+ inv_arg.func = func;
inv_arg.session = session;
inv_arg.num_params = 4;
@@ -65,7 +53,7 @@ static int get_devices(struct tee_context *ctx, u32 session,
return 0;
}
-static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
+static int optee_register_device(const uuid_t *device_uuid)
{
struct tee_client_device *optee_device = NULL;
int rc;
@@ -75,7 +63,10 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
return -ENOMEM;
optee_device->dev.bus = &tee_bus_type;
- dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
+ if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
+ kfree(optee_device);
+ return -ENOMEM;
+ }
uuid_copy(&optee_device->id.uuid, device_uuid);
rc = device_register(&optee_device->dev);
@@ -87,7 +78,7 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
return rc;
}
-int optee_enumerate_devices(void)
+static int __optee_enumerate_devices(u32 func)
{
const uuid_t pta_uuid =
UUID_INIT(0x7011a688, 0xddde, 0x4053,
@@ -118,7 +109,7 @@ int optee_enumerate_devices(void)
goto out_ctx;
}
- rc = get_devices(ctx, sess_arg.session, NULL, &shm_size);
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size, func);
if (rc < 0 || !shm_size)
goto out_sess;
@@ -130,7 +121,7 @@ int optee_enumerate_devices(void)
goto out_sess;
}
- rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size);
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size, func);
if (rc < 0)
goto out_shm;
@@ -144,7 +135,7 @@ int optee_enumerate_devices(void)
num_devices = shm_size / sizeof(uuid_t);
for (idx = 0; idx < num_devices; idx++) {
- rc = optee_register_device(&device_uuid[idx], idx);
+ rc = optee_register_device(&device_uuid[idx]);
if (rc)
goto out_shm;
}
@@ -158,3 +149,8 @@ out_ctx:
return rc;
}
+
+int optee_enumerate_devices(u32 func)
+{
+ return __optee_enumerate_devices(func);
+}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index d9c5037b4e03..8b71839a357e 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -78,6 +78,9 @@ struct optee_supp {
* @memremaped_shm virtual address of memory in shared memory pool
* @sec_caps: secure world capabilities defined by
* OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @scan_bus_done flag if device registation was already done.
+ * @scan_bus_wq workqueue to scan optee bus and register optee drivers
+ * @scan_bus_work workq to scan optee bus and register optee drivers
*/
struct optee {
struct tee_device *supp_teedev;
@@ -89,6 +92,9 @@ struct optee {
struct tee_shm_pool *pool;
void *memremaped_shm;
u32 sec_caps;
+ bool scan_bus_done;
+ struct workqueue_struct *scan_bus_wq;
+ struct work_struct scan_bus_work;
};
struct optee_session {
@@ -173,7 +179,9 @@ void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
-int optee_enumerate_devices(void);
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
/*
* Small helpers
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 457c0bf8cbf8..07b7b6b05b8b 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -102,11 +103,19 @@
#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
#define IO_MACRO_IO2_IO3_SWAP 0x4640
-#ifdef CONFIG_CONSOLE_POLL
-#define CONSOLE_RX_BYTES_PW 1
-#else
-#define CONSOLE_RX_BYTES_PW 4
-#endif
+/* We always configure 4 bytes per FIFO word */
+#define BYTES_PER_FIFO_WORD 4
+
+struct qcom_geni_private_data {
+ /* NOTE: earlycon port will have NULL here */
+ struct uart_driver *drv;
+
+ u32 poll_cached_bytes;
+ unsigned int poll_cached_bytes_cnt;
+
+ u32 write_cached_bytes;
+ unsigned int write_cached_bytes_cnt;
+};
struct qcom_geni_serial_port {
struct uart_port uport;
@@ -118,8 +127,6 @@ struct qcom_geni_serial_port {
bool setup;
int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
unsigned int baud;
- unsigned int tx_bytes_pw;
- unsigned int rx_bytes_pw;
void *rx_fifo;
u32 loopback;
bool brk;
@@ -128,6 +135,8 @@ struct qcom_geni_serial_port {
int wakeup_irq;
bool rx_tx_swap;
bool cts_rts_swap;
+
+ struct qcom_geni_private_data private_data;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -263,8 +272,9 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
unsigned int baud;
unsigned int fifo_bits;
unsigned long timeout_us = 20000;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- if (uport->private_data) {
+ if (private_data->drv) {
port = to_dev_port(uport, uport);
baud = port->baud;
if (!baud)
@@ -330,23 +340,42 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
}
#ifdef CONFIG_CONSOLE_POLL
+
static int qcom_geni_serial_get_char(struct uart_port *uport)
{
- u32 rx_fifo;
+ struct qcom_geni_private_data *private_data = uport->private_data;
u32 status;
+ u32 word_cnt;
+ int ret;
+
+ if (!private_data->poll_cached_bytes_cnt) {
+ status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ if (!word_cnt)
+ return NO_POLL_CHAR;
- status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
- writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ if (word_cnt == 1 && (status & RX_LAST))
+ private_data->poll_cached_bytes_cnt =
+ (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
+ else
+ private_data->poll_cached_bytes_cnt = 4;
- status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
- writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ private_data->poll_cached_bytes =
+ readl(uport->membase + SE_GENI_RX_FIFOn);
+ }
- status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
- if (!(status & RX_FIFO_WC_MSK))
- return NO_POLL_CHAR;
+ private_data->poll_cached_bytes_cnt--;
+ ret = private_data->poll_cached_bytes & 0xff;
+ private_data->poll_cached_bytes >>= 8;
- rx_fifo = readl(uport->membase + SE_GENI_RX_FIFOn);
- return rx_fifo & 0xff;
+ return ret;
}
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
@@ -365,13 +394,25 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
{
- writel(ch, uport->membase + SE_GENI_TX_FIFOn);
+ struct qcom_geni_private_data *private_data = uport->private_data;
+
+ private_data->write_cached_bytes =
+ (private_data->write_cached_bytes >> 8) | (ch << 24);
+ private_data->write_cached_bytes_cnt++;
+
+ if (private_data->write_cached_bytes_cnt == BYTES_PER_FIFO_WORD) {
+ writel(private_data->write_cached_bytes,
+ uport->membase + SE_GENI_TX_FIFOn);
+ private_data->write_cached_bytes_cnt = 0;
+ }
}
static void
__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
unsigned int count)
{
+ struct qcom_geni_private_data *private_data = uport->private_data;
+
int i;
u32 bytes_to_send = count;
@@ -406,6 +447,15 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
SE_GENI_M_IRQ_CLEAR);
i += chars_to_write;
}
+
+ if (private_data->write_cached_bytes_cnt) {
+ private_data->write_cached_bytes >>= BITS_PER_BYTE *
+ (BYTES_PER_FIFO_WORD - private_data->write_cached_bytes_cnt);
+ writel(private_data->write_cached_bytes,
+ uport->membase + SE_GENI_TX_FIFOn);
+ private_data->write_cached_bytes_cnt = 0;
+ }
+
qcom_geni_serial_poll_tx_done(uport);
}
@@ -478,7 +528,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
tport = &uport->state->port;
for (i = 0; i < bytes; ) {
int c;
- int chunk = min_t(int, bytes - i, port->rx_bytes_pw);
+ int chunk = min_t(int, bytes - i, BYTES_PER_FIFO_WORD);
ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
i += chunk;
@@ -658,11 +708,11 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
if (!word_cnt)
return;
- total_bytes = port->rx_bytes_pw * (word_cnt - 1);
+ total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1);
if (last_word_partial && last_word_byte_cnt)
total_bytes += last_word_byte_cnt;
else
- total_bytes += port->rx_bytes_pw;
+ total_bytes += BYTES_PER_FIFO_WORD;
port->handle_rx(uport, total_bytes, drop);
}
@@ -695,7 +745,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
}
avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
- avail *= port->tx_bytes_pw;
+ avail *= BYTES_PER_FIFO_WORD;
tail = xmit->tail;
chunk = min(avail, pending);
@@ -719,7 +769,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
int c;
memset(buf, 0, ARRAY_SIZE(buf));
- tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
+ tx_bytes = min_t(size_t, remaining, BYTES_PER_FIFO_WORD);
for (c = 0; c < tx_bytes ; c++) {
buf[c] = xmit->buf[tail++];
@@ -836,14 +886,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
u32 proto;
u32 pin_swap;
- if (uart_console(uport)) {
- port->tx_bytes_pw = 1;
- port->rx_bytes_pw = CONSOLE_RX_BYTES_PW;
- } else {
- port->tx_bytes_pw = 4;
- port->rx_bytes_pw = 4;
- }
-
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
@@ -875,10 +917,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
*/
if (uart_console(uport))
qcom_geni_serial_poll_tx_done(uport);
- geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
- false, true, false);
- geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
- false, false, true);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
+ false, true, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, GENI_SE_FIFO);
port->setup = true;
@@ -945,6 +985,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
unsigned long clk_rate;
u32 ver, sampling_rate;
+ unsigned int avg_bw_core;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
@@ -962,10 +1003,20 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
goto out_restart_rx;
uport->uartclk = clk_rate;
- clk_set_rate(port->se.clk, clk_rate);
+ dev_pm_opp_set_rate(uport->dev, clk_rate);
ser_clk_cfg = SER_CLK_EN;
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+ /*
+ * Bump up BW vote on CPU and CORE path as driver supports FIFO mode
+ * only.
+ */
+ avg_bw_core = (baud > 115200) ? Bps_to_icc(CORE_2X_50_MHZ)
+ : GENI_DEFAULT_BW;
+ port->se.icc_paths[GENI_TO_CORE].avg_bw = avg_bw_core;
+ port->se.icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(baud);
+ geni_icc_set_bw(&port->se);
+
/* parity */
tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
@@ -1121,6 +1172,14 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
struct console *con) { }
#endif
+static int qcom_geni_serial_earlycon_exit(struct console *con)
+{
+ geni_remove_earlycon_icc_vote();
+ return 0;
+}
+
+static struct qcom_geni_private_data earlycon_private_data;
+
static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
const char *opt)
{
@@ -1136,6 +1195,8 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
if (!uport->membase)
return -EINVAL;
+ uport->private_data = &earlycon_private_data;
+
memset(&se, 0, sizeof(se));
se.base = uport->membase;
if (geni_se_read_proto(&se) != GENI_SE_UART)
@@ -1153,7 +1214,8 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
*/
qcom_geni_serial_poll_tx_done(uport);
qcom_geni_serial_abort_rx(uport);
- geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
+ geni_se_config_packing(&se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
+ false, true, true);
geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
geni_se_select_mode(&se, GENI_SE_FIFO);
@@ -1166,6 +1228,7 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
+ dev->con->exit = qcom_geni_serial_earlycon_exit;
dev->con->setup = NULL;
qcom_geni_serial_enable_early_read(&se, dev->con);
@@ -1228,11 +1291,14 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
if (old_state == UART_PM_STATE_UNDEFINED)
old_state = UART_PM_STATE_OFF;
- if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+ if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
+ geni_icc_enable(&port->se);
geni_se_resources_on(&port->se);
- else if (new_state == UART_PM_STATE_OFF &&
- old_state == UART_PM_STATE_ON)
+ } else if (new_state == UART_PM_STATE_OFF &&
+ old_state == UART_PM_STATE_ON) {
geni_se_resources_off(&port->se);
+ geni_icc_disable(&port->se);
+ }
}
static const struct uart_ops qcom_geni_console_pops = {
@@ -1330,6 +1396,17 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ ret = geni_icc_get(&port->se, NULL);
+ if (ret)
+ return ret;
+ port->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
+ port->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ /* Set BW for register access */
+ ret = geni_icc_set_bw(&port->se);
+ if (ret)
+ return ret;
+
port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
"qcom_geni_serial_%s%d",
uart_console(uport) ? "console" : "uart", uport->line);
@@ -1351,13 +1428,26 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap"))
port->cts_rts_swap = true;
- uport->private_data = drv;
+ port->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se");
+ if (IS_ERR(port->se.opp_table))
+ return PTR_ERR(port->se.opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ port->se.has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ port->private_data.drv = drv;
+ uport->private_data = &port->private_data;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
ret = uart_add_one_port(drv, uport);
if (ret)
- return ret;
+ goto err;
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
@@ -1365,7 +1455,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
uart_remove_one_port(drv, uport);
- return ret;
+ goto err;
}
/*
@@ -1382,18 +1472,26 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (ret) {
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, uport);
- return ret;
+ goto err;
}
}
return 0;
+err:
+ if (port->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(port->se.opp_table);
+ return ret;
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
{
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
- struct uart_driver *drv = port->uport.private_data;
+ struct uart_driver *drv = port->private_data.drv;
+ if (port->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(port->se.opp_table);
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
@@ -1405,16 +1503,32 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
{
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- return uart_suspend_port(uport->private_data, uport);
+ /*
+ * This is done so we can hit the lowest possible state in suspend
+ * even with no_console_suspend
+ */
+ if (uart_console(uport)) {
+ geni_icc_set_tag(&port->se, 0x3);
+ geni_icc_set_bw(&port->se);
+ }
+ return uart_suspend_port(private_data->drv, uport);
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
{
+ int ret;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- return uart_resume_port(uport->private_data, uport);
+ ret = uart_resume_port(private_data->drv, uport);
+ if (uart_console(uport)) {
+ geni_icc_set_tag(&port->se, 0x7);
+ geni_icc_set_bw(&port->se);
+ }
+ return ret;
}
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h
index 6d696d2d1508..eacc0f18083e 100644
--- a/include/dt-bindings/reset/ti-syscon.h
+++ b/include/dt-bindings/reset/ti-syscon.h
@@ -2,7 +2,7 @@
/*
* TI Syscon Reset definitions
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index efcbde731f03..15c706fb0a37 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -71,6 +71,11 @@
ARM_SMCCC_SMC_32, \
0, 1)
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
#define ARM_SMCCC_ARCH_WORKAROUND_1 \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 3fa418a4ca67..22c76571a294 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -14,9 +14,11 @@
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_soc_init(struct device *dev);
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..456b6a59d29b
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Resource Management (RM) function. This includes functions for
+ * partitioning resources, pads, and memory regions.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+#endif
+#endif
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index a4dc45fbec0a..a96e8c252bac 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -17,6 +17,7 @@
#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_UPDATE_VALUE BIT(16)
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
@@ -59,6 +60,7 @@ enum cmdq_code {
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
+ CMDQ_CODE_LOGIC = 0xa0,
};
enum cmdq_cb_status {
diff --git a/include/linux/of.h b/include/linux/of.h
index 60abe3f636ad..5cf7ae0465d1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -630,6 +630,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node)
return NULL;
}
+static inline struct device_node *of_get_next_parent(struct device_node *node)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_child(
const struct device_node *node, struct device_node *prev)
{
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index dd464943f717..8f385fbe5a0e 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_QCOM_GENI_SE
#define _LINUX_QCOM_GENI_SE
+#include <linux/interconnect.h>
+
/* Transfer mode supported by GENI Serial Engines */
enum geni_se_xfer_mode {
GENI_SE_INVALID,
@@ -25,6 +27,17 @@ enum geni_se_protocol_type {
struct geni_wrapper;
struct clk;
+enum geni_icc_path_index {
+ GENI_TO_CORE,
+ CPU_TO_GENI,
+ GENI_TO_DDR
+};
+
+struct geni_icc_path {
+ struct icc_path *path;
+ unsigned int avg_bw;
+};
+
/**
* struct geni_se - GENI Serial Engine
* @base: Base Address of the Serial Engine's register block
@@ -33,6 +46,9 @@ struct clk;
* @clk: Handle to the core serial engine clock
* @num_clk_levels: Number of valid clock levels in clk_perf_tbl
* @clk_perf_tbl: Table of clock frequency input to serial engine clock
+ * @icc_paths: Array of ICC paths for SE
+ * @opp_table: Pointer to the OPP table
+ * @has_opp_table: Specifies if the SE has an OPP table
*/
struct geni_se {
void __iomem *base;
@@ -41,6 +57,9 @@ struct geni_se {
struct clk *clk;
unsigned int num_clk_levels;
unsigned long *clk_perf_tbl;
+ struct geni_icc_path icc_paths[3];
+ struct opp_table *opp_table;
+ bool has_opp_table;
};
/* Common SE registers */
@@ -229,6 +248,21 @@ struct geni_se {
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+/*
+ * Define bandwidth thresholds that cause the underlying Core 2X interconnect
+ * clock to run at the named frequency. These baseline values are recommended
+ * by the hardware team, and are not dynamically scaled with GENI bandwidth
+ * beyond basic on/off.
+ */
+#define CORE_2X_19_2_MHZ 960
+#define CORE_2X_50_MHZ 2500
+#define CORE_2X_100_MHZ 5000
+#define CORE_2X_150_MHZ 7500
+#define CORE_2X_200_MHZ 10000
+#define CORE_2X_236_MHZ 16383
+
+#define GENI_DEFAULT_BW Bps_to_icc(1000)
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
@@ -416,5 +450,16 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
+
+int geni_icc_get(struct geni_se *se, const char *icc_ddr);
+
+int geni_icc_set_bw(struct geni_se *se);
+void geni_icc_set_tag(struct geni_se *se, u32 tag);
+
+int geni_icc_enable(struct geni_se *se);
+
+int geni_icc_disable(struct geni_se *se);
+
+void geni_remove_earlycon_icc_vote(void);
#endif
#endif
diff --git a/drivers/reset/reset-simple.h b/include/linux/reset/reset-simple.h
index 08ccb25a55e6..c3e44f45b0f7 100644
--- a/drivers/reset/reset-simple.h
+++ b/include/linux/reset/reset-simple.h
@@ -27,6 +27,12 @@
* @status_active_low: if true, bits read back as cleared while the reset is
* asserted. Otherwise, bits read back as set while the
* reset is asserted.
+ * @reset_us: Minimum delay in microseconds needed that needs to be
+ * waited for between an assert and a deassert to reset the
+ * device. If multiple consumers with different delay
+ * requirements are connected to this controller, it must
+ * be the largest minimum delay. 0 means that such a delay is
+ * unknown and the reset operation is unsupported.
*/
struct reset_simple_data {
spinlock_t lock;
@@ -34,6 +40,7 @@ struct reset_simple_data {
struct reset_controller_dev rcdev;
bool active_low;
bool status_active_low;
+ unsigned int reset_us;
};
extern const struct reset_control_ops reset_simple_ops;
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index ce2f5c28b2df..7e5dd7d1e221 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -9,6 +9,7 @@
#define _LINUX_SCMI_PROTOCOL_H
#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/types.h>
#define SCMI_MAX_STR_SIZE 16
@@ -118,6 +119,8 @@ struct scmi_perf_ops {
unsigned long *rate, bool poll);
int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
unsigned long *rate, unsigned long *power);
+ bool (*fast_switch_possible)(const struct scmi_handle *handle,
+ struct device *dev);
};
/**
@@ -173,18 +176,13 @@ enum scmi_sensor_class {
*
* @count_get: get the count of sensors provided by SCMI
* @info_get: get the information of the specified sensor
- * @trip_point_notify: control notifications on cross-over events for
- * the trip-points
* @trip_point_config: selects and configures a trip-point of interest
* @reading_get: gets the current value of the sensor
*/
struct scmi_sensor_ops {
int (*count_get)(const struct scmi_handle *handle);
-
const struct scmi_sensor_info *(*info_get)
(const struct scmi_handle *handle, u32 sensor_id);
- int (*trip_point_notify)(const struct scmi_handle *handle,
- u32 sensor_id, bool enable);
int (*trip_point_config)(const struct scmi_handle *handle,
u32 sensor_id, u8 trip_id, u64 trip_value);
int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
@@ -212,6 +210,49 @@ struct scmi_reset_ops {
};
/**
+ * struct scmi_notify_ops - represents notifications' operations provided by
+ * SCMI core
+ * @register_event_notifier: Register a notifier_block for the requested event
+ * @unregister_event_notifier: Unregister a notifier_block for the requested
+ * event
+ *
+ * A user can register/unregister its own notifier_block against the wanted
+ * platform instance regarding the desired event identified by the
+ * tuple: (proto_id, evt_id, src_id) using the provided register/unregister
+ * interface where:
+ *
+ * @handle: The handle identifying the platform instance to use
+ * @proto_id: The protocol ID as in SCMI Specification
+ * @evt_id: The message ID of the desired event as in SCMI Specification
+ * @src_id: A pointer to the desired source ID if different sources are
+ * possible for the protocol (like domain_id, sensor_id...etc)
+ *
+ * @src_id can be provided as NULL if it simply does NOT make sense for
+ * the protocol at hand, OR if the user is explicitly interested in
+ * receiving notifications from ANY existent source associated to the
+ * specified proto_id / evt_id.
+ *
+ * Received notifications are finally delivered to the registered users,
+ * invoking the callback provided with the notifier_block *nb as follows:
+ *
+ * int user_cb(nb, evt_id, report)
+ *
+ * with:
+ *
+ * @nb: The notifier block provided by the user
+ * @evt_id: The message ID of the delivered event
+ * @report: A custom struct describing the specific event delivered
+ */
+struct scmi_notify_ops {
+ int (*register_event_notifier)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb);
+ int (*unregister_event_notifier)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb);
+};
+
+/**
* struct scmi_handle - Handle returned to ARM SCMI clients for usage.
*
* @dev: pointer to the SCMI device
@@ -221,6 +262,7 @@ struct scmi_reset_ops {
* @clk_ops: pointer to set of clock protocol operations
* @sensor_ops: pointer to set of sensor protocol operations
* @reset_ops: pointer to set of reset protocol operations
+ * @notify_ops: pointer to set of notifications related operations
* @perf_priv: pointer to private data structure specific to performance
* protocol(for internal use only)
* @clk_priv: pointer to private data structure specific to clock
@@ -231,6 +273,8 @@ struct scmi_reset_ops {
* protocol(for internal use only)
* @reset_priv: pointer to private data structure specific to reset
* protocol(for internal use only)
+ * @notify_priv: pointer to private data structure specific to notifications
+ * (for internal use only)
*/
struct scmi_handle {
struct device *dev;
@@ -240,12 +284,14 @@ struct scmi_handle {
struct scmi_power_ops *power_ops;
struct scmi_sensor_ops *sensor_ops;
struct scmi_reset_ops *reset_ops;
+ struct scmi_notify_ops *notify_ops;
/* for protocol internal use */
void *perf_priv;
void *clk_priv;
void *power_priv;
void *sensor_priv;
void *reset_priv;
+ void *notify_priv;
};
enum scmi_std_protocol {
@@ -324,4 +370,58 @@ typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
void scmi_protocol_unregister(int protocol_id);
+/* SCMI Notification API - Custom Event Reports */
+enum scmi_notification_events {
+ SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
+ SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
+ SCMI_EVENT_RESET_ISSUED = 0x0,
+ SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
+};
+
+struct scmi_power_state_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_state;
+};
+
+struct scmi_perf_limits_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int range_max;
+ unsigned int range_min;
+};
+
+struct scmi_perf_level_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int performance_level;
+};
+
+struct scmi_sensor_trip_point_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int trip_point_desc;
+};
+
+struct scmi_reset_issued_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int reset_state;
+};
+
+struct scmi_base_error_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ bool fatal;
+ unsigned int cmd_count;
+ unsigned long long reports[];
+};
+
#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index a74c1d5acdf3..2249ecaf77e4 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -121,6 +121,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
+ * cmdq_pkt_set_event() - append set event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be set
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
* execute an instruction that wait for a specified
* hardware register to check for the value w/o mask.
@@ -152,6 +161,28 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
*/
int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
+ * to execute an instruction that set a constant value into
+ * internal register and use as value, mask or address in
+ * read/write instruction.
+ * @pkt: the CMDQ packet
+ * @reg_idx: the CMDQ internal register ID
+ * @value: the specified value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
+
+/**
+ * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * @pkt: the CMDQ packet
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
+
/**
* cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
* packet and call back at the end of done packet
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
index 26f73df0a524..7ac115432fa1 100644
--- a/include/linux/soc/ti/k3-ringacc.h
+++ b/include/linux/soc/ti/k3-ringacc.h
@@ -107,6 +107,10 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
int id, u32 flags);
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring);
/**
* k3_ringacc_ring_reset - ring reset
* @ring: pointer on Ring
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index 11fb5048f5f6..e3aa8b14612e 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 TI SCI INTA MSI helper
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 9531ec823298..49c5d29cd33c 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -226,8 +226,8 @@ struct ti_sci_rm_core_ops {
* and destination
* @set_event_map: Set an Event based peripheral irq to Interrupt
* Aggregator.
- * @free_irq: Free an an IRQ route between the requested source
- * destination.
+ * @free_irq: Free an IRQ route between the requested source
+ * and destination.
* @free_event_map: Free an event based peripheral irq to Interrupt
* Aggregator.
*/
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index f9ec353d24a5..bdbee1a97d36 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -20,7 +20,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n);
-int rpmh_invalidate(const struct device *dev);
+void rpmh_invalidate(const struct device *dev);
#else
@@ -38,8 +38,9 @@ static inline int rpmh_write_batch(const struct device *dev,
const struct tcs_cmd *cmd, u32 *n)
{ return -ENODEV; }
-static inline int rpmh_invalidate(const struct device *dev)
-{ return -ENODEV; }
+static inline void rpmh_invalidate(const struct device *dev)
+{
+}
#endif /* CONFIG_QCOM_RPMH */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 8f8e73e5cd45..bff99f23860c 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -3,28 +3,38 @@
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*/
-#ifndef _ABI_BPMP_ABI_H_
-#define _ABI_BPMP_ABI_H_
+#ifndef ABI_BPMP_ABI_H
+#define ABI_BPMP_ABI_H
-#ifdef LK
+#if defined(LK) || defined(BPMP_ABI_HAVE_STDC)
+#include <stddef.h>
#include <stdint.h>
#endif
-#ifndef __ABI_PACKED
-#define __ABI_PACKED __attribute__((packed))
+#ifndef BPMP_ABI_PACKED
+#ifdef __ABI_PACKED
+#define BPMP_ABI_PACKED __ABI_PACKED
+#else
+#define BPMP_ABI_PACKED __attribute__((packed))
+#endif
#endif
#ifdef NO_GCC_EXTENSIONS
-#define EMPTY char empty;
-#define EMPTY_ARRAY 1
+#define BPMP_ABI_EMPTY char empty;
+#define BPMP_ABI_EMPTY_ARRAY 1
#else
-#define EMPTY
-#define EMPTY_ARRAY 0
+#define BPMP_ABI_EMPTY
+#define BPMP_ABI_EMPTY_ARRAY 0
#endif
-#ifndef __UNION_ANON
-#define __UNION_ANON
+#ifndef BPMP_UNION_ANON
+#ifdef __UNION_ANON
+#define BPMP_UNION_ANON __UNION_ANON
+#else
+#define BPMP_UNION_ANON
+#endif
#endif
+
/**
* @file
*/
@@ -73,6 +83,7 @@
struct mrq_request {
/** @brief MRQ number of the request */
uint32_t mrq;
+
/**
* @brief Flags providing follow up directions to the receiver
*
@@ -82,7 +93,7 @@ struct mrq_request {
* | 0 | should be 1 |
*/
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
@@ -98,18 +109,18 @@ struct mrq_response {
int32_t err;
/** @brief Reserved for future use */
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
* Minimum needed size for an IPC message buffer
*/
-#define MSG_MIN_SZ 128
+#define MSG_MIN_SZ 128U
/**
* @ingroup MRQ_Format
* Minimum size guaranteed for data in an IPC message buffer
*/
-#define MSG_DATA_MIN_SZ 120
+#define MSG_DATA_MIN_SZ 120U
/**
* @ingroup MRQ_Codes
@@ -118,36 +129,36 @@ struct mrq_response {
* @{
*/
-#define MRQ_PING 0
-#define MRQ_QUERY_TAG 1
-#define MRQ_MODULE_LOAD 4
-#define MRQ_MODULE_UNLOAD 5
-#define MRQ_TRACE_MODIFY 7
-#define MRQ_WRITE_TRACE 8
-#define MRQ_THREADED_PING 9
-#define MRQ_MODULE_MAIL 11
-#define MRQ_DEBUGFS 19
-#define MRQ_RESET 20
-#define MRQ_I2C 21
-#define MRQ_CLK 22
-#define MRQ_QUERY_ABI 23
-#define MRQ_PG_READ_STATE 25
-#define MRQ_PG_UPDATE_STATE 26
-#define MRQ_THERMAL 27
-#define MRQ_CPU_VHINT 28
-#define MRQ_ABI_RATCHET 29
-#define MRQ_EMC_DVFS_LATENCY 31
-#define MRQ_TRACE_ITER 64
-#define MRQ_RINGBUF_CONSOLE 65
-#define MRQ_PG 66
-#define MRQ_CPU_NDIV_LIMITS 67
-#define MRQ_STRAP 68
-#define MRQ_UPHY 69
-#define MRQ_CPU_AUTO_CC3 70
-#define MRQ_QUERY_FW_TAG 71
-#define MRQ_FMON 72
-#define MRQ_EC 73
-#define MRQ_FBVOLT_STATUS 74
+#define MRQ_PING 0U
+#define MRQ_QUERY_TAG 1U
+#define MRQ_MODULE_LOAD 4U
+#define MRQ_MODULE_UNLOAD 5U
+#define MRQ_TRACE_MODIFY 7U
+#define MRQ_WRITE_TRACE 8U
+#define MRQ_THREADED_PING 9U
+#define MRQ_MODULE_MAIL 11U
+#define MRQ_DEBUGFS 19U
+#define MRQ_RESET 20U
+#define MRQ_I2C 21U
+#define MRQ_CLK 22U
+#define MRQ_QUERY_ABI 23U
+#define MRQ_PG_READ_STATE 25U
+#define MRQ_PG_UPDATE_STATE 26U
+#define MRQ_THERMAL 27U
+#define MRQ_CPU_VHINT 28U
+#define MRQ_ABI_RATCHET 29U
+#define MRQ_EMC_DVFS_LATENCY 31U
+#define MRQ_TRACE_ITER 64U
+#define MRQ_RINGBUF_CONSOLE 65U
+#define MRQ_PG 66U
+#define MRQ_CPU_NDIV_LIMITS 67U
+#define MRQ_STRAP 68U
+#define MRQ_UPHY 69U
+#define MRQ_CPU_AUTO_CC3 70U
+#define MRQ_QUERY_FW_TAG 71U
+#define MRQ_FMON 72U
+#define MRQ_EC 73U
+#define MRQ_DEBUG 75U
/** @} */
@@ -156,7 +167,7 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 74
+#define MAX_CPU_MRQ_ID 75U
/**
* @addtogroup MRQ_Payloads
@@ -223,7 +234,7 @@ struct mrq_response {
struct mrq_ping_request {
/** @brief Arbitrarily chosen value */
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Ping
@@ -237,7 +248,7 @@ struct mrq_ping_request {
struct mrq_ping_response {
/** @brief Response to the MRQ_PING challege */
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -264,7 +275,7 @@ struct mrq_ping_response {
struct mrq_query_tag_request {
/** @brief Base address to store the firmware tag */
uint32_t addr;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
@@ -291,15 +302,15 @@ struct mrq_query_tag_request {
struct mrq_query_fw_tag_response {
/** @brief Array to store tag information */
uint8_t tag[32];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_LOAD
* @brief Dynamically load a BPMP code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_load_request
@@ -327,11 +338,11 @@ struct mrq_query_fw_tag_response {
*
*/
struct mrq_module_load_request {
- /** @brief Base address of the code to load. Treated as (void *) */
- uint32_t phys_addr; /* (void *) */
+ /** @brief Base address of the code to load */
+ uint32_t phys_addr;
/** @brief Size in bytes of code to load */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -342,7 +353,7 @@ struct mrq_module_load_request {
struct mrq_module_load_response {
/** @brief Handle to the loaded module */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -350,8 +361,8 @@ struct mrq_module_load_response {
* @def MRQ_MODULE_UNLOAD
* @brief Unload a previously loaded code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_unload_request
@@ -370,7 +381,7 @@ struct mrq_module_load_response {
struct mrq_module_unload_request {
/** @brief Handle of the module to unload */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -378,6 +389,8 @@ struct mrq_module_unload_request {
* @def MRQ_TRACE_MODIFY
* @brief Modify the set of enabled trace events
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -400,7 +413,7 @@ struct mrq_trace_modify_request {
uint32_t clr;
/** @brief Bit mask of trace events to enable */
uint32_t set;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -414,13 +427,15 @@ struct mrq_trace_modify_request {
struct mrq_trace_modify_response {
/** @brief Bit mask of trace event enable states */
uint32_t mask;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_WRITE_TRACE
* @brief Write trace data to a buffer
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -454,7 +469,7 @@ struct mrq_write_trace_request {
uint32_t area;
/** @brief Size in bytes of the output buffer */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -471,25 +486,25 @@ struct mrq_write_trace_response {
* drained to the outputbuffer. Value is 0 otherwise.
*/
uint32_t eof;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_request {
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_response {
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_MAIL
* @brief Send a message to a loadable module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_module_mail_request
@@ -510,8 +525,8 @@ struct mrq_module_mail_request {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -523,8 +538,8 @@ struct mrq_module_mail_response {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/** @endcond */
/**
@@ -532,6 +547,8 @@ struct mrq_module_mail_response {
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
+ * @deprecated use MRQ_DEBUG instead.
+ *
* * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
@@ -587,7 +604,7 @@ struct cmd_debugfs_fileop_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -598,7 +615,7 @@ struct cmd_debugfs_dumpdir_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -609,7 +626,7 @@ struct cmd_debugfs_fileop_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -620,7 +637,7 @@ struct cmd_debugfs_dumpdir_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -643,8 +660,8 @@ struct mrq_debugfs_request {
union {
struct cmd_debugfs_fileop_request fop;
struct cmd_debugfs_dumpdir_request dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -659,8 +676,8 @@ struct mrq_debugfs_response {
struct cmd_debugfs_fileop_response fop;
/** @brief Response data for CMD_DEBUGFS_DUMPDIR command */
struct cmd_debugfs_dumpdir_response dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @addtogroup Debugfs
@@ -673,6 +690,190 @@ struct mrq_debugfs_response {
/**
* @ingroup MRQ_Codes
+ * @def MRQ_DEBUG
+ * @brief Interact with BPMP's debugfs file nodes. Use message payload
+ * for exchanging data. This is functionally equivalent to
+ * @ref MRQ_DEBUGFS. But the way in which data is exchanged is different.
+ * When software running on CPU tries to read a debugfs file,
+ * the file path and read data will be stored in message payload.
+ * Since the message payload size is limited, a debugfs file
+ * transaction might require multiple frames of data exchanged
+ * between BPMP and CPU until the transaction completes.
+ *
+ * * Platforms: T194
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debug_request
+ * * Response Payload: @ref mrq_debug_response
+ */
+
+/** @ingroup Debugfs */
+enum mrq_debug_commands {
+ /** @brief Open required file for read operation */
+ CMD_DEBUG_OPEN_RO = 0,
+ /** @brief Open required file for write operation */
+ CMD_DEBUG_OPEN_WO = 1,
+ /** @brief Perform read */
+ CMD_DEBUG_READ = 2,
+ /** @brief Perform write */
+ CMD_DEBUG_WRITE = 3,
+ /** @brief Close file */
+ CMD_DEBUG_CLOSE = 4,
+ /** @brief Not a command */
+ CMD_DEBUG_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum number of files that can be open at a given time
+ */
+#define DEBUG_MAX_OPEN_FILES 1
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of null-terminated file name string in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fopen_request
+ * Value 4 corresponds to size of @ref mrq_debug_commands
+ * in @ref mrq_debug_request.
+ * 120 - 4 dbg_cmd(32bit) = 116
+ */
+#define DEBUG_FNAME_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_OPEN command
+ */
+struct cmd_debug_fopen_request {
+ /** @brief File name - Null-terminated string with maximum
+ * length @ref DEBUG_FNAME_MAX_SZ
+ */
+ char name[DEBUG_FNAME_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_OPEN_RO/WO command
+ */
+struct cmd_debug_fopen_response {
+ /** @brief Identifier for file access */
+ uint32_t fd;
+ /** @brief Data length. File data size for READ command.
+ * Maximum allowed length for WRITE command
+ */
+ uint32_t datalen;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of read data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fread_response.
+ */
+#define DEBUG_READ_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_response {
+ /** @brief Size of data provided in this response in bytes */
+ uint32_t readlen;
+ /** @brief File data from seek position */
+ char data[DEBUG_READ_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of write data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fwrite_request.
+ */
+#define DEBUG_WRITE_MAX_SZ (MSG_DATA_MIN_SZ - 12)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_WRITE command
+ */
+struct cmd_debug_fwrite_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+ /** @brief Size of write data in bytes */
+ uint32_t datalen;
+ /** @brief Data to be written */
+ char data[DEBUG_WRITE_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_CLOSE command
+ */
+struct cmd_debug_fclose_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Request with #MRQ_DEBUG.
+ *
+ * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debug_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUG_OPEN_RO |fop |
+ * |CMD_DEBUG_OPEN_WO |fop |
+ * |CMD_DEBUG_READ |frd |
+ * |CMD_DEBUG_WRITE |fwr |
+ * |CMD_DEBUG_CLOSE |fcl |
+ */
+struct mrq_debug_request {
+ /** @brief Sub-command (@ref mrq_debug_commands) */
+ uint32_t cmd;
+ union {
+ /** @brief Request payload for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_request fop;
+ /** @brief Request payload for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_request frd;
+ /** @brief Request payload for CMD_DEBUG_WRITE command */
+ struct cmd_debug_fwrite_request fwr;
+ /** @brief Request payload for CMD_DEBUG_CLOSE command */
+ struct cmd_debug_fclose_request fcl;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debug_response {
+ union {
+ /** @brief Response data for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_response fop;
+ /** @brief Response data for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_response frd;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
* @def MRQ_RESET
* @brief Reset an IP block
*
@@ -687,14 +888,41 @@ struct mrq_debugfs_response {
*/
enum mrq_reset_commands {
- /** @brief Assert module reset */
+ /**
+ * @brief Assert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_ASSERT = 1,
- /** @brief Deassert module reset */
+ /**
+ * @brief Deassert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_DEASSERT = 2,
- /** @brief Assert and deassert the module reset */
+ /**
+ * @brief Assert and deassert the module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_MODULE = 3,
- /** @brief Get the highest reset ID */
+ /**
+ * @brief Get the highest reset ID
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0)
+ */
CMD_RESET_GET_MAX_ID = 4,
+
/** @brief Not part of ABI and subject to change */
CMD_RESET_MAX,
};
@@ -710,7 +938,7 @@ struct mrq_reset_request {
uint32_t cmd;
/** @brief Id of the reset to affected */
uint32_t reset_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When
@@ -720,7 +948,7 @@ struct mrq_reset_request {
struct cmd_reset_get_max_id_response {
/** @brief Max reset id */
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response with MRQ_RESET
@@ -739,8 +967,8 @@ struct cmd_reset_get_max_id_response {
struct mrq_reset_response {
union {
struct cmd_reset_get_max_id_response reset_get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -758,17 +986,17 @@ struct mrq_reset_response {
* @addtogroup I2C
* @{
*/
-#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
-#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12U)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4U)
-#define SERIALI2C_TEN 0x0010
-#define SERIALI2C_RD 0x0001
-#define SERIALI2C_STOP 0x8000
-#define SERIALI2C_NOSTART 0x4000
-#define SERIALI2C_REV_DIR_ADDR 0x2000
-#define SERIALI2C_IGNORE_NAK 0x1000
-#define SERIALI2C_NO_RD_ACK 0x0800
-#define SERIALI2C_RECV_LEN 0x0400
+#define SERIALI2C_TEN 0x0010U
+#define SERIALI2C_RD 0x0001U
+#define SERIALI2C_STOP 0x8000U
+#define SERIALI2C_NOSTART 0x4000U
+#define SERIALI2C_REV_DIR_ADDR 0x2000U
+#define SERIALI2C_IGNORE_NAK 0x1000U
+#define SERIALI2C_NO_RD_ACK 0x0800U
+#define SERIALI2C_RECV_LEN 0x0400U
enum {
CMD_I2C_XFER = 1
@@ -798,7 +1026,7 @@ struct serial_i2c_request {
uint16_t len;
/** @brief For write transactions only, #len bytes of data */
uint8_t data[];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Trigger one or more i2c transactions
@@ -812,7 +1040,7 @@ struct cmd_i2c_xfer_request {
/** @brief Serialized packed instances of @ref serial_i2c_request*/
uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Container for data read from the i2c bus
@@ -826,7 +1054,7 @@ struct cmd_i2c_xfer_response {
uint32_t data_size;
/** @brief I2c read data */
uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_I2C
@@ -836,14 +1064,25 @@ struct mrq_i2c_request {
uint32_t cmd;
/** @brief Parameters of the transfer request */
struct cmd_i2c_xfer_request xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_I2C
+ *
+ * mrq_response:err is
+ * 0: Success
+ * -#BPMP_EBADCMD: if mrq_i2c_request::cmd is other than 1
+ * -#BPMP_EINVAL: if cmd_i2c_xfer_request does not contain correctly formatted request
+ * -#BPMP_ENODEV: if cmd_i2c_xfer_request::bus_id is not supported by BPMP
+ * -#BPMP_EACCES: if i2c transaction is not allowed due to firewall rules
+ * -#BPMP_ETIMEDOUT: if i2c transaction times out
+ * -#BPMP_ENXIO: if i2c slave device does not reply with ACK to the transaction
+ * -#BPMP_EAGAIN: if ARB_LOST condition is detected by the i2c controller
+ * -#BPMP_EIO: any other i2c controller error code than NO_ACK or ARB_LOST
*/
struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -876,90 +1115,105 @@ enum {
CMD_CLK_MAX,
};
-#define BPMP_CLK_HAS_MUX (1 << 0)
-#define BPMP_CLK_HAS_SET_RATE (1 << 1)
-#define BPMP_CLK_IS_ROOT (1 << 2)
+#define BPMP_CLK_HAS_MUX (1U << 0U)
+#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
+#define BPMP_CLK_IS_ROOT (1U << 2U)
+#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
-#define MRQ_CLK_NAME_MAXLEN 40
-#define MRQ_CLK_MAX_PARENTS 16
+#define MRQ_CLK_NAME_MAXLEN 40U
+#define MRQ_CLK_MAX_PARENTS 16U
/** @private */
struct cmd_clk_get_rate_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_parent_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_request {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_is_enabled_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_CLK sub-command CMD_CLK_IS_ENABLED
+ */
struct cmd_clk_is_enabled_response {
+ /**
+ * @brief The state of the clock that has been succesfully
+ * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
+ * master invoking the command earlier.
+ *
+ * The state may not reflect the physical state of the clock
+ * if there are some other masters requesting it to be
+ * enabled.
+ *
+ * Value 0 is disabled, all other values indicate enabled.
+ */
int32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_all_info_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_all_info_response {
uint32_t flags;
@@ -967,25 +1221,25 @@ struct cmd_clk_get_all_info_response {
uint32_t parents[MRQ_CLK_MAX_PARENTS];
uint8_t num_parents;
uint8_t name[MRQ_CLK_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_max_clk_id_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_max_clk_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1040,8 +1294,8 @@ struct mrq_clk_request {
struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1082,8 +1336,8 @@ struct mrq_clk_response {
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1109,7 +1363,7 @@ struct mrq_clk_response {
struct mrq_query_abi_request {
/** @brief MRQ code to query */
uint32_t mrq;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup ABI_info
@@ -1121,7 +1375,7 @@ struct mrq_query_abi_request {
struct mrq_query_abi_response {
/** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
int32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -1146,7 +1400,7 @@ struct mrq_query_abi_response {
struct mrq_pg_read_state_request {
/** @brief ID of partition */
uint32_t partition_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Powergating
@@ -1161,7 +1415,7 @@ struct mrq_pg_read_state_response {
* * 1 : on
*/
uint32_t logic_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/** @} */
@@ -1211,7 +1465,7 @@ struct mrq_pg_update_state_request {
* @ref logic_state == 0x3)
*/
uint32_t clock_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -1307,25 +1561,38 @@ enum pg_states {
struct cmd_pg_query_abi_request {
/** @ref mrq_pg_cmd */
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_set_state_request {
/** @ref pg_states */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_PG sub command #CMD_PG_GET_STATE
+ */
struct cmd_pg_get_state_response {
- /** @ref pg_states */
+ /**
+ * @brief The state of the power partition that has been
+ * succesfuly requested by the master earlier using #MRQ_PG
+ * command #CMD_PG_SET_STATE.
+ *
+ * The state may not reflect the physical state of the power
+ * partition if there are some other masters requesting it to
+ * be enabled.
+ *
+ * See @ref pg_states for possible values
+ */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_name_response {
uint8_t name[MRQ_PG_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_max_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_PG
@@ -1350,8 +1617,8 @@ struct mrq_pg_request {
union {
struct cmd_pg_query_abi_request query_abi;
struct cmd_pg_set_state_request set_state;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @brief Response to MRQ_PG
@@ -1373,8 +1640,8 @@ struct mrq_pg_response {
struct cmd_pg_get_state_response get_state;
struct cmd_pg_get_name_response get_name;
struct cmd_pg_get_max_id_response get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1463,6 +1730,20 @@ enum mrq_thermal_host_to_bpmp_cmd {
*/
CMD_THERMAL_GET_NUM_ZONES = 3,
+ /**
+ * @brief Get the thermtrip of the specified zone.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is
+ * * 0: Valid zone information returned.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone.
+ * * -#BPMP_ERANGE if thermtrip is invalid or disabled.
+ * * -#BPMP_EFAULT: Problem reading zone information.
+ */
+ CMD_THERMAL_GET_THERMTRIP = 4,
+
/** @brief: number of supported host-to-bpmp commands. May
* increase in future
*/
@@ -1493,7 +1774,7 @@ enum mrq_thermal_bpmp_to_host_cmd {
*/
struct cmd_thermal_query_abi_request {
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
@@ -1502,7 +1783,7 @@ struct cmd_thermal_query_abi_request {
*/
struct cmd_thermal_get_temp_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
@@ -1515,7 +1796,7 @@ struct cmd_thermal_get_temp_request {
*/
struct cmd_thermal_get_temp_response {
int32_t temp;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
@@ -1530,7 +1811,7 @@ struct cmd_thermal_set_trip_request {
int32_t low;
int32_t high;
uint32_t enabled;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
@@ -1539,7 +1820,7 @@ struct cmd_thermal_set_trip_request {
*/
struct cmd_thermal_host_trip_reached_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
@@ -1549,7 +1830,25 @@ struct cmd_thermal_host_trip_reached_request {
*/
struct cmd_thermal_get_num_zones_response {
uint32_t num;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_THERMTRIP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_thermtrip_request {
+ uint32_t zone;
+} BPMP_ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_THERMTRIP
+ *
+ * thermtrip: HW shutdown temperature in millicelsius.
+ */
+struct cmd_thermal_get_thermtrip_response {
+ int32_t thermtrip;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data.
@@ -1565,8 +1864,9 @@ struct mrq_thermal_host_to_bpmp_request {
struct cmd_thermal_query_abi_request query_abi;
struct cmd_thermal_get_temp_request get_temp;
struct cmd_thermal_set_trip_request set_trip;
- } __UNION_ANON;
-} __ABI_PACKED;
+ struct cmd_thermal_get_thermtrip_request get_thermtrip;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data.
@@ -1578,16 +1878,17 @@ struct mrq_thermal_bpmp_to_host_request {
uint32_t type;
union {
struct cmd_thermal_host_trip_reached_request host_trip_reached;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* Data in reply to a Host->BPMP request.
*/
union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_temp_response get_temp;
+ struct cmd_thermal_get_thermtrip_response get_thermtrip;
struct cmd_thermal_get_num_zones_response get_num_zones;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -1619,7 +1920,7 @@ struct mrq_cpu_vhint_request {
uint32_t addr;
/** @brief ID of the cluster whose data is requested */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Description of the CPU v/f relation
@@ -1646,7 +1947,7 @@ struct cpu_vhint_data {
uint16_t vindex_div;
/** reserved for future use */
uint16_t reserved[328];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond */
/** @} */
@@ -1735,11 +2036,11 @@ struct mrq_abi_ratchet_response {
* @brief Used by @ref mrq_emc_dvfs_latency_response
*/
struct emc_dvfs_latency {
- /** @brief EMC frequency in kHz */
+ /** @brief EMC DVFS node frequency in kHz */
uint32_t freq;
/** @brief EMC DVFS latency in nanoseconds */
uint32_t latency;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
#define EMC_DVFS_LATENCY_MAX_SIZE 14
/**
@@ -1748,9 +2049,9 @@ struct emc_dvfs_latency {
struct mrq_emc_dvfs_latency_response {
/** @brief The number valid entries in #pairs */
uint32_t num_pairs;
- /** @brief EMC <frequency, latency> information */
+ /** @brief EMC DVFS node <frequency, latency> information */
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1775,7 +2076,7 @@ struct mrq_emc_dvfs_latency_response {
struct mrq_cpu_ndiv_limits_request {
/** @brief Enum cluster_id */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_NDIV_LIMITS
@@ -1791,7 +2092,7 @@ struct mrq_cpu_ndiv_limits_response {
uint16_t ndiv_max;
/** @brief Minimum allowed NDIV value */
uint16_t ndiv_min;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1823,7 +2124,7 @@ struct mrq_cpu_ndiv_limits_response {
struct mrq_cpu_auto_cc3_request {
/** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_AUTO_CC3
@@ -1837,7 +2138,7 @@ struct mrq_cpu_auto_cc3_response {
* - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed
*/
uint32_t auto_cc3_config;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1847,6 +2148,8 @@ struct mrq_cpu_auto_cc3_response {
* @def MRQ_TRACE_ITER
* @brief Manage the trace iterator
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -1868,7 +2171,7 @@ enum {
struct mrq_trace_iter_request {
/** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1944,12 +2247,12 @@ enum mrq_ringbuf_console_host_to_bpmp_cmd {
struct cmd_ringbuf_console_query_abi_req {
/** @brief Command identifier to be queried */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_query_abi_resp {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1960,7 +2263,7 @@ struct cmd_ringbuf_console_read_req {
* @brief Number of bytes requested to be read from the BPMP TX buffer
*/
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1971,7 +2274,7 @@ struct cmd_ringbuf_console_read_resp {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1982,7 +2285,7 @@ struct cmd_ringbuf_console_write_req {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_write_req::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1993,12 +2296,12 @@ struct cmd_ringbuf_console_write_resp {
uint32_t space_avail;
/** @brief Number of bytes that were written to the BPMP RX buffer */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_get_fifo_req {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2013,7 +2316,7 @@ struct cmd_ringbuf_console_get_fifo_resp {
uint64_t bpmp_tx_tail_addr;
/** @brief Length of the BPMP TX buffer */
uint32_t bpmp_tx_buf_len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2033,8 +2336,8 @@ struct mrq_ringbuf_console_host_to_bpmp_request {
struct cmd_ringbuf_console_read_req read;
struct cmd_ringbuf_console_write_req write;
struct cmd_ringbuf_console_get_fifo_req get_fifo;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2047,7 +2350,7 @@ union mrq_ringbuf_console_bpmp_to_host_response {
struct cmd_ringbuf_console_read_resp read;
struct cmd_ringbuf_console_write_resp write;
struct cmd_ringbuf_console_get_fifo_resp get_fifo;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -2091,7 +2394,7 @@ struct mrq_strap_request {
uint32_t id;
/** @brief Desired value for strap (if cmd is #STRAP_SET) */
uint32_t value;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @defgroup Strap_Ids Strap Identifiers
@@ -2134,28 +2437,28 @@ struct cmd_uphy_margin_control_request {
uint32_t y;
/** @brief Set number of bit blocks for each margin section */
uint32_t nblks;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_margin_status_response {
/** @brief Number of errors observed */
uint32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_init_request {
/** @brief EP controller number, valid: 0, 4, 5 */
uint8_t ep_controller;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_pcie_controller_state_request {
/** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
uint8_t pcie_controller;
uint8_t enable;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_off_request {
/** @brief EP controller number, valid: 0, 4, 5 */
uint8_t ep_controller;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2186,8 +2489,8 @@ struct mrq_uphy_request {
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2207,8 +2510,8 @@ struct mrq_uphy_request {
struct mrq_uphy_response {
union {
struct cmd_uphy_margin_status_response uphy_get_margin_status;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2258,31 +2561,31 @@ enum {
struct cmd_fmon_gear_clamp_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_clamp_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_get_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_fmon_gear_get_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2315,8 +2618,8 @@ struct mrq_fmon_request {
struct cmd_fmon_gear_free_request fmon_gear_free;
/** @private */
struct cmd_fmon_gear_get_request fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2340,8 +2643,8 @@ struct mrq_fmon_response {
/** @private */
struct cmd_fmon_gear_free_response fmon_gear_free;
struct cmd_fmon_gear_get_response fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2366,13 +2669,27 @@ struct mrq_fmon_response {
*/
enum {
/**
+ * @cond DEPRECATED
* @brief Retrieve specified EC status.
*
* mrq_response::err is 0 if the operation was successful, or @n
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
- * -#BPMP_EACCES if target EC power domain is turned off
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
+ * @endcond
*/
- CMD_EC_STATUS_GET = 1,
+ CMD_EC_STATUS_GET = 1, /* deprecated */
+
+ /**
+ * @brief Retrieve specified EC extended status (includes error
+ * counter and user values).
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if target EC is not owned by BPMP @n
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
+ */
+ CMD_EC_STATUS_EX_GET = 2,
CMD_EC_NUM,
};
@@ -2428,13 +2745,13 @@ enum bpmp_ec_err_type {
/** @brief SW Correctable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_CORRECTABLE = 16,
/** @brief SW Uncorrectable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_UNCORRECTABLE = 17,
@@ -2454,9 +2771,9 @@ enum bpmp_ec_err_type {
/** @brief Group of registers with parity error. */
enum ec_registers_group {
/** @brief Functional registers group */
- EC_ERR_GROUP_FUNC_REG = 0,
+ EC_ERR_GROUP_FUNC_REG = 0U,
/** @brief SCR registers group */
- EC_ERR_GROUP_SCR_REG = 1,
+ EC_ERR_GROUP_SCR_REG = 1U,
};
/**
@@ -2465,11 +2782,11 @@ enum ec_registers_group {
* @{
*/
/** @brief No EC error found flag */
-#define EC_STATUS_FLAG_NO_ERROR 0x0001
+#define EC_STATUS_FLAG_NO_ERROR 0x0001U
/** @brief Last EC error found flag */
-#define EC_STATUS_FLAG_LAST_ERROR 0x0002
+#define EC_STATUS_FLAG_LAST_ERROR 0x0002U
/** @brief EC latent error flag */
-#define EC_STATUS_FLAG_LATENT_ERROR 0x0004
+#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U
/** @} */
/**
@@ -2478,9 +2795,9 @@ enum ec_registers_group {
* @{
*/
/** @brief EC descriptor error resolved flag */
-#define EC_DESC_FLAG_RESOLVED 0x0001
+#define EC_DESC_FLAG_RESOLVED 0x0001U
/** @brief EC descriptor failed to retrieve id flag */
-#define EC_DESC_FLAG_NO_ID 0x0002
+#define EC_DESC_FLAG_NO_ID 0x0002U
/** @} */
/**
@@ -2497,7 +2814,7 @@ struct ec_err_fmon_desc {
uint32_t fmon_faults;
/** @brief FMON faults access error */
int32_t fmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | vmon_adc_id values |
@@ -2513,7 +2830,7 @@ struct ec_err_vmon_desc {
uint32_t vmon_faults;
/** @brief VMON faults access error */
int32_t vmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | reg_id values |
@@ -2527,7 +2844,22 @@ struct ec_err_reg_parity_desc {
uint16_t reg_id;
/** @brief Register group @ref ec_registers_group */
uint16_t reg_group;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/**
+ * |error type | err_source_id values |
+ * |--------------------------------- |--------------------------|
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids |
+ */
+struct ec_err_sw_error_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief Error source id */
+ uint16_t err_source_id;
+ /** @brief Sw error data */
+ uint32_t sw_error_data;
+} BPMP_ABI_PACKED;
/**
* |error type | err_source_id values |
@@ -2537,34 +2869,36 @@ struct ec_err_reg_parity_desc {
* |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
* |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
* |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
- * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
*/
struct ec_err_simple_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
uint16_t desc_flags;
/** @brief Error source id. Id space depends on error type. */
uint16_t err_source_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @brief Union of EC error descriptors */
union ec_err_desc {
struct ec_err_fmon_desc fmon_desc;
struct ec_err_vmon_desc vmon_desc;
struct ec_err_reg_parity_desc reg_parity_desc;
+ struct ec_err_sw_error_desc sw_error_desc;
struct ec_err_simple_desc simple_desc;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_ec_status_get_request {
/** @brief HSM error line number that identifies target EC. */
uint32_t ec_hsm_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** EC status maximum number of descriptors */
-#define EC_ERR_STATUS_DESC_MAX_NUM 4
+#define EC_ERR_STATUS_DESC_MAX_NUM 4U
+/**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response {
/** @brief Target EC id (the same id received with request). */
uint32_t ec_hsm_id;
@@ -2582,7 +2916,33 @@ struct cmd_ec_status_get_response {
uint32_t error_desc_num;
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/** @endcond */
+
+struct cmd_ec_status_ex_get_response {
+ /** @brief Target EC id (the same id received with request). */
+ uint32_t ec_hsm_id;
+ /**
+ * @brief Bitmask of @ref bpmp_ec_status_flags
+ *
+ * If NO_ERROR flag is set, error_ fields should be ignored
+ */
+ uint32_t ec_status_flags;
+ /** @brief Found EC error index. */
+ uint32_t error_idx;
+ /** @brief Found EC error type @ref bpmp_ec_err_type. */
+ uint32_t error_type;
+ /** @brief Found EC mission error counter value */
+ uint32_t error_counter;
+ /** @brief Found EC mission error user value */
+ uint32_t error_uval;
+ /** @brief Reserved entry */
+ uint32_t reserved;
+ /** @brief Number of returned EC error descriptors */
+ uint32_t error_desc_num;
+ /** @brief EC error descriptors */
+ union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2591,9 +2951,15 @@ struct cmd_ec_status_get_response {
* Used by the sender of an #MRQ_EC message to access ECs owned
* by BPMP.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_get |
*
*/
@@ -2603,8 +2969,8 @@ struct mrq_ec_request {
union {
struct cmd_ec_status_get_request ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2613,49 +2979,28 @@ struct mrq_ec_request {
* Each sub-command supported by @ref mrq_ec_request may return
* sub-command-specific data as indicated below.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_ex_get |
*
*/
struct mrq_ec_response {
union {
+ /**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
-
-/** @} */
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_FBVOLT_STATUS
- * @brief Provides status information about voltage state for fuse burning
- *
- * * Platforms: T194 onwards
- * @cond bpmp_t194
- * * Initiators: CCPLEX
- * * Target: BPMP
- * * Request Payload: None
- * * Response Payload: @ref mrq_fbvolt_status_response
- * @{
- */
-
-/**
- * @ingroup Fbvolt_status
- * @brief Response to #MRQ_FBVOLT_STATUS
- *
- * Value of #ready reflects if core voltages are in a suitable state for buring
- * fuses. A value of 0x1 indicates that core voltages are ready for burning
- * fuses. A value of 0x0 indicates that core voltages are not ready.
- */
-struct mrq_fbvolt_status_response {
- /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */
- uint32_t ready;
- /** @brief Reserved */
- uint32_t unused;
-} __ABI_PACKED;
+ /** @endcond */
+ struct cmd_ec_status_ex_get_response ec_status_ex_get;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2668,6 +3013,8 @@ struct mrq_fbvolt_status_response {
* @{
*/
+/** @brief Operation not permitted */
+#define BPMP_EPERM 1
/** @brief No such file or directory */
#define BPMP_ENOENT 2
/** @brief No MRQ handler */
@@ -2676,12 +3023,16 @@ struct mrq_fbvolt_status_response {
#define BPMP_EIO 5
/** @brief Bad sub-MRQ command */
#define BPMP_EBADCMD 6
+/** @brief Resource temporarily unavailable */
+#define BPMP_EAGAIN 11
/** @brief Not enough memory */
#define BPMP_ENOMEM 12
/** @brief Permission denied */
#define BPMP_EACCES 13
/** @brief Bad address */
#define BPMP_EFAULT 14
+/** @brief Resource busy */
+#define BPMP_EBUSY 16
/** @brief No such device */
#define BPMP_ENODEV 19
/** @brief Argument is a directory */
@@ -2693,10 +3044,18 @@ struct mrq_fbvolt_status_response {
/** @brief Out of range */
#define BPMP_ERANGE 34
/** @brief Function not implemented */
-#define BPMP_ENOSYS 38
+#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief Not supported */
+#define BPMP_ENOTSUP 134
+/** @brief No such device or address */
+#define BPMP_ENXIO 140
/** @} */
+#if defined(BPMP_ABI_CHECKS)
+#include "bpmp_abi_checks.h"
+#endif
+
#endif
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 252ea20fe4c1..1097feca41ed 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -12,6 +12,8 @@
#define TEGRA124 0x40
#define TEGRA132 0x13
#define TEGRA210 0x21
+#define TEGRA186 0x18
+#define TEGRA194 0x19
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f076c430d243..f3a4b4d60714 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -35,7 +35,7 @@ TRACE_EVENT(scmi_xfer_begin,
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- u32 status),
+ int status),
TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
TP_STRUCT__entry(
@@ -43,7 +43,7 @@ TRACE_EVENT(scmi_xfer_end,
__field(u8, msg_id)
__field(u8, protocol_id)
__field(u16, seq)
- __field(u32, status)
+ __field(int, status)
),
TP_fast_assign(
@@ -54,7 +54,7 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
__entry->seq, __entry->status)
);