aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2022-08-02 08:10:10 -0700
committerLinus Torvalds2022-08-02 08:10:10 -0700
commit47b62edcd4eb70ccf4ecfacaf1a960eecfd3f891 (patch)
tree2234952a6466a991d3c336bb85ff62c529847ebf
parente233cc59d08f9bd389d4e8eaf71063150110b9b9 (diff)
parent99978d2fd90b9a43d187dae7b0f3266ccd980c5c (diff)
Merge tag 'arm-drivers-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC drivers from Arnd Bergmann: "The SoC driver updates contain changes to improve support for additional SoC variants, as well as cleanups an minor bugfixes in a number of existing drivers. Notable updates this time include: - Support for Qualcomm MSM8909 (Snapdragon 210) in various drivers - Updates for interconnect drivers on Qualcomm Snapdragon - A new driver support for NMI interrupts on Fujitsu A64fx - A rework of Broadcom BCMBCA Kconfig dependencies - Improved support for BCM2711 (Raspberry Pi 4) power management to allow the use of the V3D GPU - Cleanups to the NXP guts driver - Arm SCMI firmware driver updates to add tracing support, and use the firmware interfaces for system power control and for power capping" * tag 'arm-drivers-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (125 commits) soc: a64fx-diag: disable modular build dt-bindings: soc: qcom: qcom,smd-rpm: add power-controller dt-bindings: soc: qcom: aoss: document qcom,sm8450-aoss-qmp dt-bindings: soc: qcom,rpmh-rsc: simplify qcom,tcs-config ARM: mach-qcom: Add support for MSM8909 dt-bindings: arm: cpus: Document "qcom,msm8909-smp" enable-method soc: qcom: spm: Add CPU data for MSM8909 dt-bindings: soc: qcom: spm: Add MSM8909 CPU compatible soc: qcom: rpmpd: Add compatible for MSM8909 dt-bindings: power: qcom-rpmpd: Add MSM8909 power domains soc: qcom: smd-rpm: Add compatible for MSM8909 dt-bindings: soc: qcom: smd-rpm: Add MSM8909 soc: qcom: icc-bwmon: Remove unnecessary print function dev_err() soc: fujitsu: Add A64FX diagnostic interrupt driver soc: qcom: socinfo: Fix the id of SA8540P SoC soc: qcom: Make QCOM_RPMPD depend on PM tty: serial: bcm63xx: bcmbca: Replace ARCH_BCM_63XX with ARCH_BCMBCA spi: bcm63xx-hsspi: bcmbca: Replace ARCH_BCM_63XX with ARCH_BCMBCA clk: bcm: bcmbca: Replace ARCH_BCM_63XX with ARCH_BCMBCA hwrng: bcm2835: bcmbca: Replace ARCH_BCM_63XX with ARCH_BCMBCA ...
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-axi2apb.yaml40
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml97
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml74
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scmi.yaml10
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt4
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml86
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/mediatek,power-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/devapc.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml (renamed from Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml)14
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml91
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml33
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml5
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/arm/mach-qcom/Kconfig4
-rw-r--r--arch/arm/mach-qcom/platsmp.c1
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/clk/bcm/Kconfig4
-rw-r--r--drivers/firmware/arm_scmi/Kconfig12
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/driver.c281
-rw-r--r--drivers/firmware/arm_scmi/perf.c225
-rw-r--r--drivers/firmware/arm_scmi/powercap.c866
-rw-r--r--drivers/firmware/arm_scmi/protocols.h23
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c362
-rw-r--r--drivers/firmware/arm_scmi/system.c17
-rw-r--r--drivers/firmware/arm_scpi.c61
-rw-r--r--drivers/firmware/qcom_scm-legacy.c4
-rw-r--r--drivers/firmware/qcom_scm.c71
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c10
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/memory/mtk-smi.c17
-rw-r--r--drivers/memory/tegra/tegra234.c80
-rw-r--r--drivers/mfd/bcm2835-pm.c74
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c1
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c4
-rw-r--r--drivers/soc/bcm/bcm2835-power.c72
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c9
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c2
-rw-r--r--drivers/soc/fsl/guts.c221
-rw-r--r--drivers/soc/fujitsu/Kconfig16
-rw-r--r--drivers/soc/fujitsu/Makefile3
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c154
-rw-r--r--drivers/soc/imx/gpcv2.c8
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c9
-rw-r--r--drivers/soc/mediatek/Kconfig10
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt6795-pm-domains.h112
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h1
-rw-r--r--drivers/soc/mediatek/mt8186-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mt8195-pm-domains.h4
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h22
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c45
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c155
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c8
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c225
-rw-r--r--drivers/soc/mediatek/mtk-svs.c2403
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c15
-rw-r--r--drivers/soc/qcom/cmd-db.c8
-rw-r--r--drivers/soc/qcom/icc-bwmon.c419
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2
-rw-r--r--drivers/soc/qcom/mdt_loader.c4
-rw-r--r--drivers/soc/qcom/ocmem.c3
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/rpmhpd.c4
-rw-r--r--drivers/soc/qcom/rpmpd.c1
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/qcom/smp2p.c3
-rw-r--r--drivers/soc/qcom/socinfo.c4
-rw-r--r--drivers/soc/qcom/spm.c14
-rw-r--r--drivers/soc/renesas/r8a779a0-sysc.c10
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h4
-rw-r--r--drivers/soc/renesas/rcar-sysc.h4
-rw-r--r--drivers/soc/sunxi/Kconfig1
-rw-r--r--drivers/soc/ti/pruss.c1
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c2
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--include/dt-bindings/clock/tegra234-clock.h101
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h21
-rw-r--r--include/dt-bindings/power/mt6795-power.h16
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h7
-rw-r--r--include/dt-bindings/power/tegra234-powergate.h1
-rw-r--r--include/dt-bindings/reset/tegra234-reset.h9
-rw-r--r--include/linux/mfd/bcm2835-pm.h1
-rw-r--r--include/linux/scmi_protocol.h134
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h27
-rw-r--r--include/trace/events/scmi.h56
105 files changed, 6418 insertions, 588 deletions
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index ed04650291a8..5c2e3a5f3789 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -221,6 +221,7 @@ properties:
- qcom,kpss-acc-v1
- qcom,kpss-acc-v2
- qcom,msm8226-smp
+ - qcom,msm8909-smp
# Only valid on ARM 32-bit, see above for ARM v8 64-bit
- qcom,msm8916-smp
- renesas,apmu
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 5c06d1bfc046..6208558e1036 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: QCOM device tree bindings
maintainers:
- - Stephen Boyd <sboyd@codeaurora.org>
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
description: |
Some qcom based bootloaders identify the dtb blob based on a set of
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-axi2apb.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-axi2apb.yaml
new file mode 100644
index 000000000000..788a13f8aa93
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-axi2apb.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra194-axi2apb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra194 AXI2APB bridge
+
+maintainers:
+ - Sumit Gupta <sumitg@nvidia.com>
+
+properties:
+ $nodename:
+ pattern: "^axi2apb@([0-9a-f]+)$"
+
+ compatible:
+ enum:
+ - nvidia,tegra194-axi2apb
+
+ reg:
+ maxItems: 6
+ description: Physical base address and length of registers for all bridges
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ axi2apb: axi2apb@2390000 {
+ compatible = "nvidia,tegra194-axi2apb";
+ reg = <0x02390000 0x1000>,
+ <0x023a0000 0x1000>,
+ <0x023b0000 0x1000>,
+ <0x023c0000 0x1000>,
+ <0x023d0000 0x1000>,
+ <0x023e0000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml
new file mode 100644
index 000000000000..debb2b0c8013
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra194-cbb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra194 CBB 1.0 bindings
+
+maintainers:
+ - Sumit Gupta <sumitg@nvidia.com>
+
+description: |+
+ The Control Backbone (CBB) is comprised of the physical path from an
+ initiator to a target's register configuration space. CBB 1.0 has
+ multiple hierarchical sub-NOCs (Network-on-Chip) and connects various
+ initiators and targets using different bridges like AXIP2P, AXI2APB.
+
+ This driver handles errors due to illegal register accesses reported
+ by the NOCs inside the CBB. NOCs reporting errors are cluster NOCs
+ "AON-NOC, SCE-NOC, RCE-NOC, BPMP-NOC, CV-NOC" and "CBB Central NOC"
+ which is the main NOC.
+
+ By default, the access issuing initiator is informed about the error
+ using SError or Data Abort exception unless the ERD (Error Response
+ Disable) is enabled/set for that initiator. If the ERD is enabled, then
+ SError or Data Abort is masked and the error is reported with interrupt.
+
+ - For CCPLEX (CPU Complex) initiator, the driver sets ERD bit. So, the
+ errors due to illegal accesses from CCPLEX are reported by interrupts.
+ If ERD is not set, then error is reported by SError.
+ - For other initiators, the ERD is disabled. So, the access issuing
+ initiator is informed about the illegal access by Data Abort exception.
+ In addition, an interrupt is also generated to CCPLEX. These initiators
+ include all engines using Cortex-R5 (which is ARMv7 CPU cluster) and
+ engines like TSEC (Security co-processor), NVDEC (NVIDIA Video Decoder
+ engine) etc which can initiate transactions.
+
+ The driver prints relevant debug information like Error Code, Error
+ Description, Master, Address, AXI ID, Cache, Protection, Security Group
+ etc on receiving error notification.
+
+properties:
+ $nodename:
+ pattern: "^[a-z]+-noc@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - nvidia,tegra194-cbb-noc
+ - nvidia,tegra194-aon-noc
+ - nvidia,tegra194-bpmp-noc
+ - nvidia,tegra194-rce-noc
+ - nvidia,tegra194-sce-noc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ CCPLEX receives secure or nonsecure interrupt depending on error type.
+ A secure interrupt is received for SEC(firewall) & SLV errors and a
+ non-secure interrupt is received for TMO & DEC errors.
+ items:
+ - description: non-secure interrupt
+ - description: secure interrupt
+
+ nvidia,axi2apb:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description:
+ Specifies the node having all axi2apb bridges which need to be checked
+ for any error logged in their status register.
+
+ nvidia,apbmisc:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description:
+ Specifies the apbmisc node which need to be used for reading the ERD
+ register.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - nvidia,apbmisc
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ cbb-noc@2300000 {
+ compatible = "nvidia,tegra194-cbb-noc";
+ reg = <0x02300000 0x1000>;
+ interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+ nvidia,axi2apb = <&axi2apb>;
+ nvidia,apbmisc = <&apbmisc>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml
new file mode 100644
index 000000000000..7b1fe50ffbe0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra234-cbb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra CBB 2.0 bindings
+
+maintainers:
+ - Sumit Gupta <sumitg@nvidia.com>
+
+description: |+
+ The Control Backbone (CBB) is comprised of the physical path from an
+ initiator to a target's register configuration space. CBB 2.0 consists
+ of multiple sub-blocks connected to each other to create a topology.
+ The Tegra234 SoC has different fabrics based on CBB 2.0 architecture
+ which include cluster fabrics BPMP, AON, PSC, SCE, RCE, DCE, FSI and
+ "CBB central fabric".
+
+ In CBB 2.0, each initiator which can issue transactions connects to a
+ Root Master Node (MN) before it connects to any other element of the
+ fabric. Each Root MN contains a Error Monitor (EM) which detects and
+ logs error. Interrupts from various EM blocks are collated by Error
+ Notifier (EN) which is per fabric and presents a single interrupt from
+ fabric to the SoC interrupt controller.
+
+ The driver handles errors from CBB due to illegal register accesses
+ and prints debug information about failed transaction on receiving
+ the interrupt from EN. Debug information includes Error Code, Error
+ Description, MasterID, Fabric, SlaveID, Address, Cache, Protection,
+ Security Group etc on receiving error notification.
+
+ If the Error Response Disable (ERD) is set/enabled for an initiator,
+ then SError or Data abort exception error response is masked and an
+ interrupt is used for reporting errors due to illegal accesses from
+ that initiator. The value returned on read failures is '0xFFFFFFFF'
+ for compatibility with PCIE.
+
+properties:
+ $nodename:
+ pattern: "^[a-z]+-fabric@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - nvidia,tegra234-aon-fabric
+ - nvidia,tegra234-bpmp-fabric
+ - nvidia,tegra234-cbb-fabric
+ - nvidia,tegra234-dce-fabric
+ - nvidia,tegra234-rce-fabric
+ - nvidia,tegra234-sce-fabric
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: secure interrupt from error notifier
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ cbb-fabric@1300000 {
+ compatible = "nvidia,tegra234-cbb-fabric";
+ reg = <0x13a00000 0x400000>;
+ interrupts = <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
index 948e2a38beed..1c0388da6721 100644
--- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
+++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
@@ -183,6 +183,12 @@ properties:
required:
- reg
+ protocol@18:
+ type: object
+ properties:
+ reg:
+ const: 0x18
+
additionalProperties: false
patternProperties:
@@ -323,6 +329,10 @@ examples:
};
};
};
+
+ scmi_powercap: protocol@18 {
+ reg = <0x18>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 0f4e5ab26477..b3f702cbed87 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -23,10 +23,13 @@ Required properties:
* "qcom,scm-msm8994"
* "qcom,scm-msm8996"
* "qcom,scm-msm8998"
+ * "qcom,scm-qcs404"
* "qcom,scm-sc7180"
* "qcom,scm-sc7280"
+ * "qcom,scm-sm6125"
* "qcom,scm-sdm845"
* "qcom,scm-sdx55"
+ * "qcom,scm-sdx65"
* "qcom,scm-sm6350"
* "qcom,scm-sm8150"
* "qcom,scm-sm8250"
@@ -43,6 +46,7 @@ Required properties:
clock and "bus" for the bus clock per the requirements of the compatible.
- qcom,dload-mode: phandle to the TCSR hardware block and offset of the
download mode control register (optional)
+- interconnects: Specifies the bandwidth requirements of the SCM interface (optional)
Example for MSM8916:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
new file mode 100644
index 000000000000..c2e697f6e6cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8998-bwmon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Interconnect Bandwidth Monitor
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+description: |
+ Bandwidth Monitor measures current throughput on buses between various NoC
+ fabrics and provides information when it crosses configured thresholds.
+
+ Certain SoCs might have more than one Bandwidth Monitors, for example on SDM845::
+ - Measuring the bandwidth between CPUs and Last Level Cache Controller -
+ called just BWMON,
+ - Measuring the bandwidth between Last Level Cache Controller and memory
+ (DDR) - called LLCC BWMON.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sdm845-bwmon
+ - const: qcom,msm8998-bwmon
+ - const: qcom,msm8998-bwmon # BWMON v4
+
+ interconnects:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ operating-points-v2: true
+ opp-table: true
+
+ reg:
+ # BWMON v4 (currently described) and BWMON v5 use one register address
+ # space. BWMON v2 uses two register spaces - not yet described.
+ maxItems: 1
+
+required:
+ - compatible
+ - interconnects
+ - interrupts
+ - operating-points-v2
+ - opp-table
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pmu@1436400 {
+ compatible = "qcom,sdm845-bwmon", "qcom,msm8998-bwmon";
+ reg = <0x01436400 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_LLCC 3>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+
+ cpu_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-0 {
+ opp-peak-kBps = <4800000>;
+ };
+ opp-1 {
+ opp-peak-kBps = <9216000>;
+ };
+ opp-2 {
+ opp-peak-kBps = <15052800>;
+ };
+ opp-3 {
+ opp-peak-kBps = <20889600>;
+ };
+ opp-4 {
+ opp-peak-kBps = <25497600>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
index a98b359bf909..71bc5cefb49c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
@@ -32,6 +32,7 @@ properties:
- mediatek,mt2701-smi-common
- mediatek,mt2712-smi-common
- mediatek,mt6779-smi-common
+ - mediatek,mt6795-smi-common
- mediatek,mt8167-smi-common
- mediatek,mt8173-smi-common
- mediatek,mt8183-smi-common
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
index c886681f62a7..59dcd163668f 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
@@ -20,6 +20,7 @@ properties:
- mediatek,mt2701-smi-larb
- mediatek,mt2712-smi-larb
- mediatek,mt6779-smi-larb
+ - mediatek,mt6795-smi-larb
- mediatek,mt8167-smi-larb
- mediatek,mt8173-smi-larb
- mediatek,mt8183-smi-larb
diff --git a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
index 135c6f722091..b448101fac43 100644
--- a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
@@ -23,6 +23,7 @@ properties:
compatible:
enum:
+ - mediatek,mt6795-power-controller
- mediatek,mt8167-power-controller
- mediatek,mt8173-power-controller
- mediatek,mt8183-power-controller
@@ -62,6 +63,7 @@ patternProperties:
reg:
description: |
Power domain index. Valid values are defined in:
+ "include/dt-bindings/power/mt6795-power.h" - for MT8167 type power domain.
"include/dt-bindings/power/mt8167-power.h" - for MT8167 type power domain.
"include/dt-bindings/power/mt8173-power.h" - for MT8173 type power domain.
"include/dt-bindings/power/mt8183-power.h" - for MT8183 type power domain.
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index ad77a6380f38..0ccca493251a 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -18,6 +18,7 @@ properties:
enum:
- qcom,mdm9607-rpmpd
- qcom,msm8226-rpmpd
+ - qcom,msm8909-rpmpd
- qcom,msm8916-rpmpd
- qcom,msm8939-rpmpd
- qcom,msm8953-rpmpd
diff --git a/Documentation/devicetree/bindings/soc/mediatek/devapc.yaml b/Documentation/devicetree/bindings/soc/mediatek/devapc.yaml
index 31e4d3c339bf..d0a4bc3b03e9 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/devapc.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/devapc.yaml
@@ -20,6 +20,7 @@ properties:
compatible:
enum:
- mediatek,mt6779-devapc
+ - mediatek,mt8186-devapc
reg:
description: The base address of devapc register bank
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
index 3fdad71210b4..627dcc3e8b32 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/display/mediatek/mediatek,mutex.yaml#
+$id: http://devicetree.org/schemas/soc/mediatek/mediatek,mutex.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek mutex
@@ -55,6 +55,18 @@ properties:
include/dt-bindings/gce/<chip>-gce.h of each chips.
$ref: /schemas/types.yaml#/definitions/uint32-array
+ mediatek,gce-client-reg:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ items:
+ - description: phandle of GCE
+ - description: GCE subsys id
+ - description: register offset
+ - description: register size
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property. Each GCE subsys id is mapping to
+ a client defined in the header include/dt-bindings/gce/<chip>-gce.h.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml b/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml
new file mode 100644
index 000000000000..d911fa2d40ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/mediatek/mtk-svs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Smart Voltage Scaling (SVS) Device Tree Bindings
+
+maintainers:
+ - Roger Lu <roger.lu@mediatek.com>
+ - Matthias Brugger <matthias.bgg@gmail.com>
+ - Kevin Hilman <khilman@kernel.org>
+
+description: |+
+ The SVS engine is a piece of hardware which has several
+ controllers(banks) for calculating suitable voltage to
+ different power domains(CPU/GPU/CCI) according to
+ chip process corner, temperatures and other factors. Then DVFS
+ driver could apply SVS bank voltage to PMIC/Buck.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt8183-svs
+ - mediatek,mt8192-svs
+
+ reg:
+ maxItems: 1
+ description: Address range of the MTK SVS controller.
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: Main clock for MTK SVS controller to work.
+
+ clock-names:
+ const: main
+
+ nvmem-cells:
+ minItems: 1
+ description:
+ Phandle to the calibration data provided by a nvmem device.
+ items:
+ - description: SVS efuse for SVS controller
+ - description: Thermal efuse for SVS controller
+
+ nvmem-cell-names:
+ items:
+ - const: svs-calibration-data
+ - const: t-calibration-data
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: svs_rst
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - nvmem-cells
+ - nvmem-cell-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8183-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ svs@1100b000 {
+ compatible = "mediatek,mt8183-svs";
+ reg = <0 0x1100b000 0 0x1000>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_THERM>;
+ clock-names = "main";
+ nvmem-cells = <&svs_calibration>, <&thermal_calibration>;
+ nvmem-cell-names = "svs-calibration-data", "t-calibration-data";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
index e2e173dfada7..d01e98768153 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
@@ -33,6 +33,7 @@ properties:
- qcom,sm8150-aoss-qmp
- qcom,sm8250-aoss-qmp
- qcom,sm8350-aoss-qmp
+ - qcom,sm8450-aoss-qmp
- const: qcom,aoss-qmp
reg:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
index f5ecf4a8c377..4a50f1d27724 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
@@ -65,33 +65,22 @@ properties:
qcom,tcs-config:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 4
+ maxItems: 4
items:
- - items:
- - description: TCS type
- enum: [ 0, 1, 2, 3 ]
- - description: Number of TCS
- - items:
- - description: TCS type
- enum: [ 0, 1, 2, 3 ]
- - description: Number of TCS
- - items:
- - description: TCS type
- enum: [ 0, 1, 2, 3]
- - description: Numbe r of TCS
- - items:
- - description: TCS type
- enum: [ 0, 1, 2, 3 ]
- - description: Number of TCS
+ items:
+ - description: |
+ TCS type::
+ - ACTIVE_TCS
+ - SLEEP_TCS
+ - WAKE_TCS
+ - CONTROL_TCS
+ enum: [ 0, 1, 2, 3 ]
+ - description: Number of TCS
description: |
The tuple defining the configuration of TCS. Must have two cells which
describe each TCS type. The order of the TCS must match the hardware
configuration.
- Cell 1 (TCS Type):: TCS types to be specified::
- - ACTIVE_TCS
- - SLEEP_TCS
- - WAKE_TCS
- - CONTROL_TCS
- Cell 2 (Number of TCS):: <u32>
qcom,tcs-offset:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index f0f1bf06aea6..50f834563e19 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -34,6 +34,7 @@ properties:
- qcom,rpm-apq8084
- qcom,rpm-ipq6018
- qcom,rpm-msm8226
+ - qcom,rpm-msm8909
- qcom,rpm-msm8916
- qcom,rpm-msm8936
- qcom,rpm-msm8953
@@ -51,6 +52,9 @@ properties:
$ref: /schemas/clock/qcom,rpmcc.yaml#
unevaluatedProperties: false
+ power-controller:
+ $ref: /schemas/power/qcom,rpmpd.yaml#
+
qcom,smd-channels:
$ref: /schemas/types.yaml#/definitions/string-array
description: Channel name used for the RPM communication
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml
index 07d2d5398345..f433e6e0a19f 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml
@@ -22,6 +22,7 @@ properties:
- qcom,sdm660-silver-saw2-v4.1-l2
- qcom,msm8998-gold-saw2-v4.1-l2
- qcom,msm8998-silver-saw2-v4.1-l2
+ - qcom,msm8909-saw2-v3.0-cpu
- qcom,msm8916-saw2-v3.0-cpu
- qcom,msm8226-saw2-v2.1-cpu
- qcom,msm8974-saw2-v2.1-cpu
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
index d891ecfb2691..5320504bb5e0 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
@@ -77,7 +77,6 @@ properties:
Should reference the tx-enable and tx-rings-empty SMEM states.
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string-array
items:
- const: tx-enable
- const: tx-rings-empty
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
index 64461d432004..847873289f25 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
@@ -65,10 +65,11 @@ properties:
- ti,am4376-pruss0 # for AM437x SoC family and PRUSS unit 0
- ti,am4376-pruss1 # for AM437x SoC family and PRUSS unit 1
- ti,am5728-pruss # for AM57xx SoC family
- - ti,k2g-pruss # for 66AK2G SoC family
+ - ti,am625-pruss # for K3 AM62x SoC family
+ - ti,am642-icssg # for K3 AM64x SoC family
- ti,am654-icssg # for K3 AM65x SoC family
- ti,j721e-icssg # for K3 J721E SoC family
- - ti,am642-icssg # for K3 AM64x SoC family
+ - ti,k2g-pruss # for 66AK2G SoC family
reg:
maxItems: 1
diff --git a/MAINTAINERS b/MAINTAINERS
index 2b8493bdc2cf..da9777c407d0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -242,6 +242,11 @@ F: include/trace/events/9p.h
F: include/uapi/linux/virtio_9p.h
F: net/9p/
+A64FX DIAG DRIVER
+M: Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>
+S: Supported
+F: drivers/soc/fujitsu/a64fx-diag.c
+
A8293 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -16674,6 +16679,13 @@ S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
F: drivers/i2c/busses/i2c-qcom-cci.c
+QUALCOMM INTERCONNECT BWMON DRIVER
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+F: drivers/soc/qcom/icc-bwmon.c
+
QUALCOMM IOMMU
M: Rob Clark <robdclark@gmail.com>
L: iommu@lists.linux.dev
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 109e126f7271..12a812e61c16 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -20,6 +20,10 @@ config ARCH_MSM8X60
bool "Enable support for MSM8X60"
select CLKSRC_QCOM
+config ARCH_MSM8909
+ bool "Enable support for MSM8909"
+ select HAVE_ARM_ARCH_TIMER
+
config ARCH_MSM8916
bool "Enable support for MSM8916"
select HAVE_ARM_ARCH_TIMER
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 65a0d5ce2bb3..5d2f386a46d8 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -384,6 +384,7 @@ static const struct smp_operations qcom_smp_cortex_a7_ops __initconst = {
#endif
};
CPU_METHOD_OF_DECLARE(qcom_smp_msm8226, "qcom,msm8226-smp", &qcom_smp_cortex_a7_ops);
+CPU_METHOD_OF_DECLARE(qcom_smp_msm8909, "qcom,msm8909-smp", &qcom_smp_cortex_a7_ops);
CPU_METHOD_OF_DECLARE(qcom_smp_msm8916, "qcom,msm8916-smp", &qcom_smp_cortex_a7_ops);
static const struct smp_operations qcom_smp_kpssv1_ops __initconst = {
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index bb45a9c00514..1c9f4fb2595d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -148,7 +148,7 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
- ARCH_BCM_63XX || COMPILE_TEST
+ ARCH_BCMBCA || COMPILE_TEST
select SATA_HOST
help
This option enables support for the AHCI SATA3 controller found on
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b3f2d55dc551..3da8e85f8aae 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -87,7 +87,7 @@ config HW_RANDOM_BA431
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
- ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index ec738f74a026..77266afb1c79 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -22,9 +22,9 @@ config CLK_BCM2835
config CLK_BCM_63XX
bool "Broadcom BCM63xx clock support"
- depends on ARCH_BCM_63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || COMPILE_TEST
select COMMON_CLK_IPROC
- default ARCH_BCM_63XX
+ default ARCH_BCMBCA
help
Enable common clock framework support for Broadcom BCM63xx DSL SoCs
based on the ARM architecture
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 1e7b7fec97d9..a14f65444b35 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN
will be called scmi_pm_domain. Note this may needed early in boot
before rootfs may be available.
+config ARM_SCMI_POWER_CONTROL
+ tristate "SCMI system power control driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ help
+ This enables System Power control logic which binds system shutdown or
+ reboot actions to SCMI System Power notifications generated by SCP
+ firmware.
+
+ This driver can also be built as a module. If so, the module will be
+ called scmi_power_control. Note this may needed early in boot to catch
+ early shutdown/reboot SCMI requests.
+
endmenu
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 8d4afadda38c..9ea86f8cc8f7 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8b7ac6663d57..609ebedee9cb 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,6 +19,7 @@
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
@@ -60,6 +61,11 @@ static atomic_t transfer_last_id;
static DEFINE_IDR(scmi_requested_devices);
static DEFINE_MUTEX(scmi_requested_devices_mtx);
+/* Track globally the creation of SCMI SystemPower related devices */
+static bool scmi_syspower_registered;
+/* Protect access to scmi_syspower_registered */
+static DEFINE_MUTEX(scmi_syspower_mtx);
+
struct scmi_requested_dev {
const struct scmi_device_id *id_table;
struct list_head node;
@@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
+
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
+ "DLYD" : "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.type);
@@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* Trace polled replies. */
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
}
} else {
/* And we wait for the response. */
@@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
@@ -1259,10 +1286,174 @@ out:
return ret;
}
+struct scmi_msg_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+static void
+scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id, u32 valid_size,
+ u32 domain, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db = NULL;
+ struct scmi_msg_get_fc_info *info;
+ struct scmi_msg_resp_desc_fc *resp;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ if (!p_addr) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, describe_id,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ goto err_out;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ /*
+ * Bail out on error leaving fc_info addresses zeroed; this includes
+ * the case in which the requested domain/message_id does NOT support
+ * fastchannels at all.
+ */
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (size != valid_size) {
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_xfer;
+ }
+
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
+ if (!db) {
+ ret = -ENOMEM;
+ goto err_db;
+ }
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_db_mem;
+ }
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+
+ *p_db = db;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ dev_dbg(ph->dev,
+ "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
+ pi->proto->id, message_id, domain);
+
+ return;
+
+err_db_mem:
+ devm_kfree(ph->dev, db);
+
+err_db:
+ *p_addr = NULL;
+
+err_xfer:
+ ph->xops->xfer_put(ph, t);
+
+err_out:
+ dev_warn(ph->dev,
+ "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
+ pi->proto->id, message_id, domain, ret);
+}
+
+#define SCMI_PROTO_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PROTO_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PROTO_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PROTO_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PROTO_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
+ .fastchannel_init = scmi_common_fastchannel_init,
+ .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
/**
@@ -1497,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res)
scmi_protocol_release(dres->handle, dres->protocol_id);
}
+static struct scmi_protocol_instance __must_check *
+scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = sdev->handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ return pi;
+}
+
/**
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
* @sdev: A reference to an scmi_device whose embedded struct device is to
@@ -1520,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
struct scmi_protocol_handle **ph)
{
struct scmi_protocol_instance *pi;
- struct scmi_protocol_devres *dres;
- struct scmi_handle *handle = sdev->handle;
if (!ph)
return ERR_PTR(-EINVAL);
- dres = devres_alloc(scmi_devm_release_protocol,
- sizeof(*dres), GFP_KERNEL);
- if (!dres)
- return ERR_PTR(-ENOMEM);
-
- pi = scmi_get_protocol_instance(handle, protocol_id);
- if (IS_ERR(pi)) {
- devres_free(dres);
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
return pi;
- }
-
- dres->handle = handle;
- dres->protocol_id = protocol_id;
- devres_add(&sdev->dev, dres);
*ph = &pi->ph;
return pi->proto->ops;
}
+/**
+ * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Get hold of a protocol accounting for its usage, possibly triggering its
+ * initialization but without getting access to its protocol specific operations
+ * and handle.
+ *
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
+ u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
+ return PTR_ERR(pi);
+
+ return 0;
+}
+
static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
{
struct scmi_protocol_devres *dres = res;
@@ -1849,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
if (sdev)
return sdev;
+ mutex_lock(&scmi_syspower_mtx);
+ if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
+ dev_warn(info->dev,
+ "SCMI SystemPower protocol device must be unique !\n");
+ mutex_unlock(&scmi_syspower_mtx);
+
+ return NULL;
+ }
+
pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
+ if (prot_id == SCMI_PROTOCOL_SYSTEM)
+ scmi_syspower_registered = true;
+
+ mutex_unlock(&scmi_syspower_mtx);
+
return sdev;
}
@@ -2132,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
+ handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
@@ -2401,6 +2650,7 @@ static int __init scmi_driver_init(void)
scmi_sensors_register();
scmi_voltage_register();
scmi_system_register();
+ scmi_powercap_register();
return platform_driver_register(&scmi_driver);
}
@@ -2417,6 +2667,7 @@ static void __exit scmi_driver_exit(void)
scmi_sensors_unregister();
scmi_voltage_unregister();
scmi_system_unregister();
+ scmi_powercap_unregister();
scmi_bus_exit();
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index bbb0331801ff..64ea2d2f2875 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -10,13 +10,14 @@
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
+#include <trace/events/scmi.h>
+
#include "protocols.h"
#include "notify.h"
@@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd {
PERF_DOMAIN_NAME_GET = 0xc,
};
+enum {
+ PERF_FC_LEVEL,
+ PERF_FC_LIMIT,
+ PERF_FC_MAX,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
@@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[];
};
-struct scmi_perf_get_fc_info {
- __le32 domain;
- __le32 message_id;
-};
-
-struct scmi_msg_resp_perf_desc_fc {
- __le32 attr;
-#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
-#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
- __le32 rate_limit;
- __le32 chan_addr_low;
- __le32 chan_addr_high;
- __le32 chan_size;
- __le32 db_addr_low;
- __le32 db_addr_high;
- __le32 db_set_lmask;
- __le32 db_set_hmask;
- __le32 db_preserve_lmask;
- __le32 db_preserve_hmask;
-};
-
-struct scmi_fc_db_info {
- int width;
- u64 set;
- u64 mask;
- void __iomem *addr;
-};
-
-struct scmi_fc_info {
- void __iomem *level_set_addr;
- void __iomem *limit_set_addr;
- void __iomem *level_get_addr;
- void __iomem *limit_get_addr;
- struct scmi_fc_db_info *level_set_db;
- struct scmi_fc_db_info *limit_set_db;
-};
-
struct perf_dom_info {
bool set_limits;
bool set_perf;
@@ -360,40 +330,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
return ret;
}
-#define SCMI_PERF_FC_RING_DB(w) \
-do { \
- u##w val = 0; \
- \
- if (db->mask) \
- val = ioread##w(db->addr) & db->mask; \
- iowrite##w((u##w)db->set | val, db->addr); \
-} while (0)
-
-static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
-{
- if (!db || !db->addr)
- return;
-
- if (db->width == 1)
- SCMI_PERF_FC_RING_DB(8);
- else if (db->width == 2)
- SCMI_PERF_FC_RING_DB(16);
- else if (db->width == 4)
- SCMI_PERF_FC_RING_DB(32);
- else /* db->width == 8 */
-#ifdef CONFIG_64BIT
- SCMI_PERF_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
-}
-
static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
@@ -426,10 +362,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
- if (dom->fc_info && dom->fc_info->limit_set_addr) {
- iowrite32(max_perf, dom->fc_info->limit_set_addr);
- iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
- scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
+ domain, min_perf, max_perf);
+ iowrite32(max_perf, fci->set_addr);
+ iowrite32(min_perf, fci->set_addr + 4);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -468,9 +408,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->limit_get_addr) {
- *max_perf = ioread32(dom->fc_info->limit_get_addr);
- *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ *max_perf = ioread32(fci->get_addr);
+ *min_perf = ioread32(fci->get_addr + 4);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
+ domain, *min_perf, *max_perf);
return 0;
}
@@ -505,9 +449,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_set_addr) {
- iowrite32(level, dom->fc_info->level_set_addr);
- scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
+ domain, level, 0);
+ iowrite32(level, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -542,8 +490,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_get_addr) {
- *level = ioread32(dom->fc_info->level_get_addr);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
+ domain, *level, 0);
return 0;
}
@@ -572,100 +522,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
return ret;
}
-static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
-{
- if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
- return true;
- if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
- return true;
- return false;
-}
-
-static void
-scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
- u32 message_id, void __iomem **p_addr,
- struct scmi_fc_db_info **p_db)
-{
- int ret;
- u32 flags;
- u64 phys_addr;
- u8 size;
- void __iomem *addr;
- struct scmi_xfer *t;
- struct scmi_fc_db_info *db;
- struct scmi_perf_get_fc_info *info;
- struct scmi_msg_resp_perf_desc_fc *resp;
-
- if (!p_addr)
- return;
-
- ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- sizeof(*info), sizeof(*resp), &t);
- if (ret)
- return;
-
- info = t->tx.buf;
- info->domain = cpu_to_le32(domain);
- info->message_id = cpu_to_le32(message_id);
-
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- goto err_xfer;
-
- resp = t->rx.buf;
- flags = le32_to_cpu(resp->attr);
- size = le32_to_cpu(resp->chan_size);
- if (!scmi_perf_fc_size_is_valid(message_id, size))
- goto err_xfer;
-
- phys_addr = le32_to_cpu(resp->chan_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
- *p_addr = addr;
-
- if (p_db && SUPPORTS_DOORBELL(flags)) {
- db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- goto err_xfer;
-
- size = 1 << DOORBELL_REG_WIDTH(flags);
- phys_addr = le32_to_cpu(resp->db_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
-
- db->addr = addr;
- db->width = size;
- db->set = le32_to_cpu(resp->db_set_lmask);
- db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
- db->mask = le32_to_cpu(resp->db_preserve_lmask);
- db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
- *p_db = db;
- }
-err_xfer:
- ph->xops->xfer_put(ph, t);
-}
-
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
- fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
+ fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
- &fc->level_set_addr, &fc->level_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
- &fc->level_get_addr, NULL);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
- &fc->limit_set_addr, &fc->limit_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
- &fc->limit_get_addr, NULL);
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, domain,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_GET, 4, domain,
+ &fc[PERF_FC_LEVEL].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, domain,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_GET, 8, domain,
+ &fc[PERF_FC_LIMIT].get_addr, NULL);
+
*p_fc = fc;
}
@@ -789,7 +672,7 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
dom = pi->dom_info + scmi_dev_domain_id(dev);
- return dom->fc_info && dom->fc_info->level_set_addr;
+ return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
}
static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
new file mode 100644
index 000000000000..83b90bde755c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Powercap Protocol
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_powercap_protocol_cmd {
+ POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
+ POWERCAP_CAP_GET = 0x4,
+ POWERCAP_CAP_SET = 0x5,
+ POWERCAP_PAI_GET = 0x6,
+ POWERCAP_PAI_SET = 0x7,
+ POWERCAP_DOMAIN_NAME_GET = 0x8,
+ POWERCAP_MEASUREMENTS_GET = 0x9,
+ POWERCAP_CAP_NOTIFY = 0xa,
+ POWERCAP_MEASUREMENTS_NOTIFY = 0xb,
+ POWERCAP_DESCRIBE_FASTCHANNEL = 0xc,
+};
+
+enum {
+ POWERCAP_FC_CAP,
+ POWERCAP_FC_PAI,
+ POWERCAP_FC_MAX,
+};
+
+struct scmi_msg_resp_powercap_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30))
+#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28))
+#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27))
+#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26))
+#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25))
+#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22))
+#define POWERCAP_POWER_UNIT(x) \
+ (FIELD_GET(GENMASK(24, 23), (x)))
+#define SUPPORTS_POWER_UNITS_MW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x2)
+#define SUPPORTS_POWER_UNITS_UW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x1)
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 min_pai;
+ __le32 max_pai;
+ __le32 pai_step;
+ __le32 min_power_cap;
+ __le32 max_power_cap;
+ __le32 power_cap_step;
+ __le32 sustainable_power;
+ __le32 accuracy;
+ __le32 parent_id;
+};
+
+struct scmi_msg_powercap_set_cap_or_pai {
+ __le32 domain;
+ __le32 flags;
+#define CAP_SET_ASYNC BIT(1)
+#define CAP_SET_IGNORE_DRESP BIT(0)
+ __le32 value;
+};
+
+struct scmi_msg_resp_powercap_cap_set_complete {
+ __le32 domain;
+ __le32 power_cap;
+};
+
+struct scmi_msg_resp_powercap_meas_get {
+ __le32 power;
+ __le32 pai;
+};
+
+struct scmi_msg_powercap_notify_cap {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_msg_powercap_notify_thresh {
+ __le32 domain;
+ __le32 notify_enable;
+ __le32 power_thresh_low;
+ __le32 power_thresh_high;
+};
+
+struct scmi_powercap_cap_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_cap;
+ __le32 pai;
+};
+
+struct scmi_powercap_meas_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power;
+};
+
+struct scmi_powercap_state {
+ bool meas_notif_enabled;
+ u64 thresholds;
+#define THRESH_LOW(p, id) \
+ (lower_32_bits((p)->states[(id)].thresholds))
+#define THRESH_HIGH(p, id) \
+ (upper_32_bits((p)->states[(id)].thresholds))
+};
+
+struct powercap_info {
+ u32 version;
+ int num_domains;
+ struct scmi_powercap_state *states;
+ struct scmi_powercap_info *powercaps;
+};
+
+static enum scmi_powercap_protocol_cmd evt_2_cmd[] = {
+ POWERCAP_CAP_NOTIFY,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+};
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable);
+
+static int
+scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 attributes;
+
+ attributes = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline int
+scmi_powercap_validate(unsigned int min_val, unsigned int max_val,
+ unsigned int step_val, bool configurable)
+{
+ if (!min_val || !max_val)
+ return -EPROTO;
+
+ if ((configurable && min_val == max_val) ||
+ (!configurable && min_val != max_val))
+ return -EPROTO;
+
+ if (min_val != max_val && !step_val)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pinfo, u32 domain)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_powercap_info *dom_info = pinfo->powercaps + domain;
+ struct scmi_msg_resp_powercap_domain_attributes *resp;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(resp->attributes);
+
+ dom_info->id = domain;
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ dom_info->async_powercap_cap_set =
+ SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
+ dom_info->powercap_cap_config =
+ SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags);
+ dom_info->powercap_monitoring =
+ SUPPORTS_POWERCAP_MONITORING(flags);
+ dom_info->powercap_pai_config =
+ SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags);
+ dom_info->powercap_scale_mw =
+ SUPPORTS_POWER_UNITS_MW(flags);
+ dom_info->powercap_scale_uw =
+ SUPPORTS_POWER_UNITS_UW(flags);
+ dom_info->fastchannels =
+ SUPPORTS_POWERCAP_FASTCHANNELS(flags);
+
+ strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ dom_info->min_pai = le32_to_cpu(resp->min_pai);
+ dom_info->max_pai = le32_to_cpu(resp->max_pai);
+ dom_info->pai_step = le32_to_cpu(resp->pai_step);
+ ret = scmi_powercap_validate(dom_info->min_pai,
+ dom_info->max_pai,
+ dom_info->pai_step,
+ dom_info->powercap_pai_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent PAI config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap);
+ dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap);
+ dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step);
+ ret = scmi_powercap_validate(dom_info->min_power_cap,
+ dom_info->max_power_cap,
+ dom_info->power_cap_step,
+ dom_info->powercap_cap_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent CAP config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->sustainable_power =
+ le32_to_cpu(resp->sustainable_power);
+ dom_info->accuracy = le32_to_cpu(resp->accuracy);
+
+ dom_info->parent_id = le32_to_cpu(resp->parent_id);
+ if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID &&
+ (dom_info->parent_id >= pinfo->num_domains ||
+ dom_info->parent_id == dom_info->id)) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent parent ID for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ ret = -ENODEV;
+ }
+ }
+
+clean:
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+
+ return ret;
+}
+
+static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const struct scmi_powercap_info *
+scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains)
+ return NULL;
+
+ return pi->powercaps + domain_id;
+}
+
+static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *power_cap = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_cap || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
+ *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
+ domain_id, *power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap);
+}
+
+static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *pc,
+ u32 power_cap, bool ignore_dresp)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(pc->id);
+ msg->flags =
+ cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
+ FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
+ msg->value = cpu_to_le32(power_cap);
+
+ if (!pc->async_powercap_cap_set || ignore_dresp) {
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_powercap_cap_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain) == pc->id)
+ dev_dbg(ph->dev,
+ "Powercap ID %d CAP set async to %u\n",
+ pc->id,
+ get_unaligned_le32(&resp->power_cap));
+ else
+ ret = -EPROTO;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_cap,
+ bool ignore_dresp)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_cap_config || !power_cap ||
+ power_cap < pc->min_power_cap ||
+ power_cap > pc->max_power_cap)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
+
+ iowrite32(power_cap, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
+ domain_id, power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp);
+}
+
+static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *pai = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pai || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) {
+ *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET,
+ domain_id, *pai, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_get(ph, domain_id, pai);
+}
+
+static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(domain_id);
+ msg->flags = cpu_to_le32(0);
+ msg->value = cpu_to_le32(pai);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_pai_config || !pai ||
+ pai < pc->min_pai || pai > pc->max_pai)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET,
+ domain_id, pai, 0);
+ iowrite32(pai, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_set(ph, domain_id, pai);
+}
+
+static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power,
+ u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_powercap_meas_get *resp;
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_monitoring || !pai || !average_power)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET,
+ sizeof(u32), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ resp = t->rx.buf;
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ *average_power = le32_to_cpu(resp->power);
+ *pai = le32_to_cpu(resp->pai);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_thresh_low || !power_thresh_high ||
+ domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ *power_thresh_low = THRESH_LOW(pi, domain_id);
+ *power_thresh_high = THRESH_HIGH(pi, domain_id);
+
+ return 0;
+}
+
+static int
+scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high)
+{
+ int ret = 0;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains ||
+ power_thresh_low > power_thresh_high)
+ return -EINVAL;
+
+ /* Anything to do ? */
+ if (THRESH_LOW(pi, domain_id) == power_thresh_low &&
+ THRESH_HIGH(pi, domain_id) == power_thresh_high)
+ return ret;
+
+ pi->states[domain_id].thresholds =
+ (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) |
+ FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high));
+
+ /* Update thresholds if notification already enabled */
+ if (pi->states[domain_id].meas_notif_enabled)
+ ret = scmi_powercap_notify(ph, domain_id,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ true);
+
+ return ret;
+}
+
+static const struct scmi_powercap_proto_ops powercap_proto_ops = {
+ .num_domains_get = scmi_powercap_num_domains_get,
+ .info_get = scmi_powercap_dom_info_get,
+ .cap_get = scmi_powercap_cap_get,
+ .cap_set = scmi_powercap_cap_set,
+ .pai_get = scmi_powercap_pai_get,
+ .pai_set = scmi_powercap_pai_set,
+ .measurements_get = scmi_powercap_measurements_get,
+ .measurements_threshold_set = scmi_powercap_measurements_threshold_set,
+ .measurements_threshold_get = scmi_powercap_measurements_threshold_get,
+};
+
+static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_SET, 4, domain,
+ &fc[POWERCAP_FC_CAP].set_addr,
+ &fc[POWERCAP_FC_CAP].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_GET, 4, domain,
+ &fc[POWERCAP_FC_CAP].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_SET, 4, domain,
+ &fc[POWERCAP_FC_PAI].set_addr,
+ &fc[POWERCAP_FC_PAI].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_GET, 4, domain,
+ &fc[POWERCAP_FC_PAI].get_addr, NULL);
+
+ *p_fc = fc;
+}
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ switch (message_id) {
+ case POWERCAP_CAP_NOTIFY:
+ {
+ struct scmi_msg_powercap_notify_cap *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ break;
+ }
+ case POWERCAP_MEASUREMENTS_NOTIFY:
+ {
+ u32 low, high;
+ struct scmi_msg_powercap_notify_thresh *notify;
+
+ /*
+ * Note that we have to pick the most recently configured
+ * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY
+ * enable request and we fail, complaining, if no thresholds
+ * were ever set, since this is an indication the API has been
+ * used wrongly.
+ */
+ ret = scmi_powercap_measurements_threshold_get(ph, domain,
+ &low, &high);
+ if (ret)
+ return ret;
+
+ if (enable && !low && !high) {
+ dev_err(ph->dev,
+ "Invalid Measurements Notify thresholds: %u/%u\n",
+ low, high);
+ return -EINVAL;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ notify->power_thresh_low = cpu_to_le32(low);
+ notify->power_thresh_high = cpu_to_le32(high);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_powercap_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+ else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY)
+ /*
+ * On success save the current notification enabled state, so
+ * as to be able to properly update the notification thresholds
+ * when they are modified on a domain for which measurement
+ * notifications were currently enabled.
+ *
+ * This is needed because the SCMI Notification core machinery
+ * and API does not support passing per-notification custom
+ * arguments at callback registration time.
+ *
+ * Note that this can be done here with a simple flag since the
+ * SCMI core Notifications code takes care of keeping proper
+ * per-domain enables refcounting, so that this helper function
+ * will be called only once (for enables) when the first user
+ * registers a callback on this domain and once more (disable)
+ * when the last user de-registers its callback.
+ */
+ pi->states[src_id].meas_notif_enabled = enable;
+
+ return ret;
+}
+
+static void *
+scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_POWERCAP_CAP_CHANGED:
+ {
+ const struct scmi_powercap_cap_changed_notify_payld *p = payld;
+ struct scmi_powercap_cap_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_cap = le32_to_cpu(p->power_cap);
+ r->pai = le32_to_cpu(p->pai);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED:
+ {
+ const struct scmi_powercap_meas_changed_notify_payld *p = payld;
+ struct scmi_powercap_meas_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power = le32_to_cpu(p->power);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int
+scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event powercap_events[] = {
+ {
+ .id = SCMI_EVENT_POWERCAP_CAP_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_cap_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_cap_changed_report),
+ },
+ {
+ .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_meas_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_meas_changed_report),
+ },
+};
+
+static const struct scmi_event_ops powercap_event_ops = {
+ .get_num_sources = scmi_powercap_get_num_sources,
+ .set_notify_enabled = scmi_powercap_set_notify_enabled,
+ .fill_custom_report = scmi_powercap_fill_custom_report,
+};
+
+static const struct scmi_protocol_events powercap_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &powercap_event_ops,
+ .evts = powercap_events,
+ .num_events = ARRAY_SIZE(powercap_events),
+};
+
+static int
+scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct powercap_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Powercap Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_powercap_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->powercaps),
+ GFP_KERNEL);
+ if (!pinfo->powercaps)
+ return -ENOMEM;
+
+ /*
+ * Note that any failure in retrieving any domain attribute leads to
+ * the whole Powercap protocol initialization failure: this way the
+ * reported Powercap domains are all assured, when accessed, to be well
+ * formed and correlated by sane parent-child relationship (if any).
+ */
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain);
+ if (ret)
+ return ret;
+
+ if (pinfo->powercaps[domain].fastchannels)
+ scmi_powercap_domain_init_fc(ph, domain,
+ &pinfo->powercaps[domain].fc_info);
+ }
+
+ pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->states), GFP_KERNEL);
+ if (!pinfo->states)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_powercap = {
+ .id = SCMI_PROTOCOL_POWERCAP,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_powercap_protocol_init,
+ .ops = &powercap_proto_ops,
+ .events = &powercap_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 51c31379f9b3..2f3bf691db7c 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -215,6 +215,19 @@ struct scmi_iterator_ops {
struct scmi_iterator_state *st, void *priv);
};
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *set_addr;
+ void __iomem *get_addr;
+ struct scmi_fc_db_info *set_db;
+};
+
/**
* struct scmi_proto_helpers_ops - References to common protocol helpers
* @extended_name_get: A common helper function to retrieve extended naming
@@ -230,6 +243,9 @@ struct scmi_iterator_ops {
* provided in @ops.
* @iter_response_run: A common helper to trigger the run of a previously
* initialized iterator.
+ * @fastchannel_init: A common helper used to initialize FC descriptors by
+ * gathering FC descriptions from the SCMI platform server.
+ * @fastchannel_db_ring: A common helper to ring a FC doorbell.
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
@@ -239,6 +255,12 @@ struct scmi_proto_helpers_ops {
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv);
int (*iter_response_run)(void *iter);
+ void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id,
+ u32 valid_size, u32 domain,
+ void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db);
+ void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
};
/**
@@ -315,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
+DECLARE_SCMI_REGISTER_UNREGISTER(powercap);
#endif /* _SCMI_PROTOCOLS_H */
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
new file mode 100644
index 000000000000..6eb7d2a4b6b1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic SystemPower Control driver.
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+/*
+ * In order to handle platform originated SCMI SystemPower requests (like
+ * shutdowns or cold/warm resets) we register an SCMI Notification notifier
+ * block to react when such SCMI SystemPower events are emitted by platform.
+ *
+ * Once such a notification is received we act accordingly to perform the
+ * required system transition depending on the kind of request.
+ *
+ * Graceful requests are routed to userspace through the same API methods
+ * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus
+ * events.
+ *
+ * Direct forceful requests are not supported since are not meant to be sent
+ * by the SCMI platform to an OSPM like Linux.
+ *
+ * Additionally, graceful request notifications can carry an optional timeout
+ * field stating the maximum amount of time allowed by the platform for
+ * completion after which they are converted to forceful ones: the assumption
+ * here is that even graceful requests can be upper-bound by a maximum final
+ * timeout strictly enforced by the platform itself which can ultimately cut
+ * the power off at will anytime; in order to avoid such extreme scenario, we
+ * track progress of graceful requests through the means of a reboot notifier
+ * converting timed-out graceful requests to forceful ones, so at least we
+ * try to perform a clean sync and shutdown/restart before the power is cut.
+ *
+ * Given the peculiar nature of SCMI SystemPower protocol, that is being in
+ * charge of triggering system wide shutdown/reboot events, there should be
+ * only one SCMI platform actively emitting SystemPower events.
+ * For this reason the SCMI core takes care to enforce the creation of one
+ * single unique device associated to the SCMI System Power protocol; no matter
+ * how many SCMI platforms are defined on the system, only one can be designated
+ * to support System Power: as a consequence this driver will never be probed
+ * more than once.
+ *
+ * For similar reasons as soon as the first valid SystemPower is received by
+ * this driver and the shutdown/reboot is started, any further notification
+ * possibly emitted by the platform will be ignored.
+ */
+
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifndef MODULE
+#include <linux/fs.h>
+#endif
+
+enum scmi_syspower_state {
+ SCMI_SYSPOWER_IDLE,
+ SCMI_SYSPOWER_IN_PROGRESS,
+ SCMI_SYSPOWER_REBOOTING
+};
+
+/**
+ * struct scmi_syspower_conf - Common configuration
+ *
+ * @dev: A reference device
+ * @state: Current SystemPower state
+ * @state_mtx: @state related mutex
+ * @required_transition: The requested transition as decribed in the received
+ * SCMI SystemPower notification
+ * @userspace_nb: The notifier_block registered against the SCMI SystemPower
+ * notification to start the needed userspace interactions.
+ * @reboot_nb: A notifier_block optionally used to track reboot progress
+ * @forceful_work: A worker used to trigger a forceful transition once a
+ * graceful has timed out.
+ */
+struct scmi_syspower_conf {
+ struct device *dev;
+ enum scmi_syspower_state state;
+ /* Protect access to state */
+ struct mutex state_mtx;
+ enum scmi_system_events required_transition;
+
+ struct notifier_block userspace_nb;
+ struct notifier_block reboot_nb;
+
+ struct delayed_work forceful_work;
+};
+
+#define userspace_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, userspace_nb)
+
+#define reboot_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, reboot_nb)
+
+#define dwork_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, forceful_work)
+
+/**
+ * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful
+ * system transition
+ * @nb: Reference to the related notifier block
+ * @reason: The reason for the ongoing reboot
+ * @__unused: The cmd being executed on a restart request (unused)
+ *
+ * When an ongoing system transition is detected, compatible with the one
+ * requested by SCMI, cancel the delayed work.
+ *
+ * Return: NOTIFY_OK in any case
+ */
+static int scmi_reboot_notifier(struct notifier_block *nb,
+ unsigned long reason, void *__unused)
+{
+ struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb);
+
+ mutex_lock(&sc->state_mtx);
+ switch (reason) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ case SYS_RESTART:
+ if (sc->required_transition == SCMI_SYSTEM_COLDRESET ||
+ sc->required_transition == SCMI_SYSTEM_WARMRESET)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ default:
+ break;
+ }
+
+ if (sc->state == SCMI_SYSPOWER_REBOOTING) {
+ dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n");
+ cancel_delayed_work_sync(&sc->forceful_work);
+ }
+ mutex_unlock(&sc->state_mtx);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * scmi_request_forceful_transition - Request forceful SystemPower transition
+ * @sc: A reference to the configuration data
+ *
+ * Initiates the required SystemPower transition without involving userspace:
+ * just trigger the action at the kernel level after issuing an emergency
+ * sync. (if possible at all)
+ */
+static inline void
+scmi_request_forceful_transition(struct scmi_syspower_conf *sc)
+{
+ dev_dbg(sc->dev, "Serving forceful request:%d\n",
+ sc->required_transition);
+
+#ifndef MODULE
+ emergency_sync();
+#endif
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ kernel_power_off();
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ kernel_restart(NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void scmi_forceful_work_func(struct work_struct *work)
+{
+ struct scmi_syspower_conf *sc;
+ struct delayed_work *dwork;
+
+ if (system_state > SYSTEM_RUNNING)
+ return;
+
+ dwork = to_delayed_work(work);
+ sc = dwork_to_sconf(dwork);
+
+ dev_dbg(sc->dev, "Graceful request timed out...forcing !\n");
+ mutex_lock(&sc->state_mtx);
+ /* avoid deadlock by unregistering reboot notifier first */
+ unregister_reboot_notifier(&sc->reboot_nb);
+ if (sc->state == SCMI_SYSPOWER_IN_PROGRESS)
+ scmi_request_forceful_transition(sc);
+ mutex_unlock(&sc->state_mtx);
+}
+
+/**
+ * scmi_request_graceful_transition - Request graceful SystemPower transition
+ * @sc: A reference to the configuration data
+ * @timeout_ms: The desired timeout to wait for the shutdown to complete before
+ * system is forcibly shutdown.
+ *
+ * Initiates the required SystemPower transition, requesting userspace
+ * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event
+ * processing.
+ *
+ * Takes care also to register a reboot notifier and to schedule a delayed work
+ * in order to detect if userspace actions are taking too long and in such a
+ * case to trigger a forceful transition.
+ */
+static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
+ unsigned int timeout_ms)
+{
+ unsigned int adj_timeout_ms = 0;
+
+ if (timeout_ms) {
+ int ret;
+
+ sc->reboot_nb.notifier_call = &scmi_reboot_notifier;
+ ret = register_reboot_notifier(&sc->reboot_nb);
+ if (!ret) {
+ /* Wait only up to 75% of the advertised timeout */
+ adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
+ INIT_DELAYED_WORK(&sc->forceful_work,
+ scmi_forceful_work_func);
+ schedule_delayed_work(&sc->forceful_work,
+ msecs_to_jiffies(adj_timeout_ms));
+ } else {
+ /* Carry on best effort even without a reboot notifier */
+ dev_warn(sc->dev,
+ "Cannot register reboot notifier !\n");
+ }
+ }
+
+ dev_dbg(sc->dev,
+ "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n",
+ sc->required_transition, timeout_ms, adj_timeout_ms);
+
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ /*
+ * When triggered early at boot-time the 'orderly' call will
+ * partially fail due to the lack of userspace itself, but
+ * the force=true argument will start anyway a successful
+ * forced shutdown.
+ */
+ orderly_poweroff(true);
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ orderly_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * scmi_userspace_notifier - Notifier callback to act on SystemPower
+ * Notifications
+ * @nb: Reference to the related notifier block
+ * @event: The SystemPower notification event id
+ * @data: The SystemPower event report
+ *
+ * This callback is in charge of decoding the received SystemPower report
+ * and act accordingly triggering a graceful or forceful system transition.
+ *
+ * Note that once a valid SCMI SystemPower event starts being served, any
+ * other following SystemPower notification received from the same SCMI
+ * instance (handle) will be ignored.
+ *
+ * Return: NOTIFY_OK once a valid SystemPower event has been successfully
+ * processed.
+ */
+static int scmi_userspace_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct scmi_system_power_state_notifier_report *er = data;
+ struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
+
+ if (er->system_state >= SCMI_SYSTEM_POWERUP) {
+ dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
+ er->system_state);
+ return NOTIFY_DONE;
+ }
+
+ if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) {
+ dev_err(sc->dev, "Ignoring forceful notification.\n");
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * Bail out if system is already shutting down or an SCMI SystemPower
+ * requested is already being served.
+ */
+ if (system_state > SYSTEM_RUNNING)
+ return NOTIFY_DONE;
+ mutex_lock(&sc->state_mtx);
+ if (sc->state != SCMI_SYSPOWER_IDLE) {
+ dev_dbg(sc->dev,
+ "Transition already in progress...ignore.\n");
+ mutex_unlock(&sc->state_mtx);
+ return NOTIFY_DONE;
+ }
+ sc->state = SCMI_SYSPOWER_IN_PROGRESS;
+ mutex_unlock(&sc->state_mtx);
+
+ sc->required_transition = er->system_state;
+
+ /* Leaving a trace in logs of who triggered the shutdown/reboot. */
+ dev_info(sc->dev, "Serving shutdown/reboot request: %d\n",
+ sc->required_transition);
+
+ scmi_request_graceful_transition(sc, er->timeout);
+
+ return NOTIFY_OK;
+}
+
+static int scmi_syspower_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct scmi_syspower_conf *sc;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM);
+ if (ret)
+ return ret;
+
+ sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ mutex_init(&sc->state_mtx);
+ sc->required_transition = SCMI_SYSTEM_MAX;
+ sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
+ sc->dev = &sdev->dev;
+
+ return handle->notify_ops->devm_event_notifier_register(sdev,
+ SCMI_PROTOCOL_SYSTEM,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ NULL, &sc->userspace_nb);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_SYSTEM, "syspower" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_system_power_driver = {
+ .name = "scmi-system-power",
+ .probe = scmi_syspower_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_system_power_driver);
+
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 220e399118ad..9383d7584539 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld {
__le32 agent_id;
__le32 flags;
__le32 system_state;
+ __le32 timeout;
};
struct scmi_system_info {
u32 version;
+ bool graceful_timeout_supported;
};
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
@@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
+ size_t expected_sz;
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+ expected_sz = pinfo->graceful_timeout_supported ?
+ sizeof(*p) : sizeof(*p) - sizeof(__le32);
if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
- sizeof(*p) != payld_sz)
+ payld_sz != expected_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->flags = le32_to_cpu(p->flags);
r->system_state = le32_to_cpu(p->system_state);
+ if (pinfo->graceful_timeout_supported &&
+ r->system_state == SCMI_SYSTEM_SHUTDOWN &&
+ SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags))
+ r->timeout = le32_to_cpu(p->timeout);
+ else
+ r->timeout = 0x00;
*src_id = 0;
return r;
@@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
return -ENOMEM;
pinfo->version = version;
+ if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
+ pinfo->graceful_timeout_supported = true;
+
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ddf0b9ff9e15..435d0e2658a4 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
- if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
@@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
- if (!scpi_info)
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
- scpi_info->is_legacy = true;
+ scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
- GFP_KERNEL);
- if (!scpi_info->channels)
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
return -ENOMEM;
- ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
- for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
- int idx = scpi_info->num_chans;
- struct scpi_chan *pchan = scpi_info->channels + idx;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}
- scpi_info->commands = scpi_std_commands;
+ scpi_drvinfo->commands = scpi_std_commands;
- platform_set_drvdata(pdev, scpi_info);
+ platform_set_drvdata(pdev, scpi_drvinfo);
- if (scpi_info->is_legacy) {
+ if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
- scpi_info->commands = scpi_legacy_commands;
+ scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
- scpi_info->cmd_priority);
+ scpi_drvinfo->cmd_priority);
}
- ret = scpi_init_versions(scpi_info);
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
return ret;
}
- if (scpi_info->is_legacy && !scpi_info->protocol_version &&
- !scpi_info->firmware_version)
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
- scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
}
static const struct of_device_id scpi_of_match[] = {
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 1829ba220576..9f918b9e6f8f 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc,
/**
* scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
+ * @dev: device
+ * @desc: descriptor structure containing arguments and return values
+ * @res: results from SMC call
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
@@ -211,6 +214,7 @@ out:
/**
* scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
+ * @unused: device, legacy argument, not used, can be NULL
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
*
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 3163660fa8e2..cdbfe54c8146 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -7,6 +7,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
@@ -31,8 +32,13 @@ struct qcom_scm {
struct clk *core_clk;
struct clk *iface_clk;
struct clk *bus_clk;
+ struct icc_path *path;
struct reset_controller_dev reset;
+ /* control access to the interconnect path */
+ struct mutex scm_bw_lock;
+ int scm_vote_count;
+
u64 dload_mode_addr;
};
@@ -99,6 +105,42 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
+static int qcom_scm_bw_enable(void)
+{
+ int ret = 0;
+
+ if (!__scm->path)
+ return 0;
+
+ if (IS_ERR(__scm->path))
+ return -EINVAL;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (!__scm->scm_vote_count) {
+ ret = icc_set_bw(__scm->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(__scm->dev, "failed to set bandwidth request\n");
+ goto err_bw;
+ }
+ }
+ __scm->scm_vote_count++;
+err_bw:
+ mutex_unlock(&__scm->scm_bw_lock);
+
+ return ret;
+}
+
+static void qcom_scm_bw_disable(void)
+{
+ if (IS_ERR_OR_NULL(__scm->path))
+ return;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (__scm->scm_vote_count-- == 1)
+ icc_set_bw(__scm->path, 0, 0);
+ mutex_unlock(&__scm->scm_bw_lock);
+}
+
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
@@ -444,10 +486,15 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
if (ret)
goto out;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
out:
@@ -507,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -537,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -566,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1277,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ mutex_init(&scm->scm_bw_lock);
+
clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ scm->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(scm->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
+ "failed to acquire interconnect path\n");
+
scm->core_clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(scm->core_clk)) {
if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
@@ -1337,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
/*
* If requested enable "download mode", from this point on warmboot
- * will cause the the boot stages to enter download mode, unless
+ * will cause the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
if (download_mode)
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index fd89899aeeed..0c440afd5224 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -474,7 +474,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
dentry = debugfs_create_file(name, mode, parent, bpmp,
&bpmp_debug_fops);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
err = -ENOMEM;
goto out;
}
@@ -725,7 +725,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
if (t & DEBUGFS_S_ISDIR) {
dentry = debugfs_create_dir(name, parent);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
if (err < 0)
@@ -738,7 +738,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
dentry = debugfs_create_file(name, mode,
parent, bpmp,
&debugfs_fops);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
}
}
@@ -788,11 +788,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
return 0;
root = debugfs_create_dir("bpmp", NULL);
- if (!root)
+ if (IS_ERR(root))
return -ENOMEM;
bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror) {
+ if (IS_ERR(bpmp->debugfs_mirror)) {
err = -ENOMEM;
goto out;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 5654c5e9862b..037db21de510 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
int err;
if (data && size > 0)
- memcpy(data, channel->ib->data, size);
+ memcpy_fromio(data, channel->ib->data, size);
err = tegra_bpmp_ack_response(channel);
if (err < 0)
@@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
channel->ob->flags = flags;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
return tegra_bpmp_post_request(channel);
}
@@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
channel->ob->code = code;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a1bae59208e3..708a67c7faaa 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -486,7 +486,7 @@ config I2C_BCM_KONA
config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
- depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCM_63XX || \
+ depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \
ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default y
help
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 4c5154e0bf00..d7cb7ead2ac7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -21,11 +21,13 @@
/* SMI COMMON */
#define SMI_L1LEN 0x100
+#define SMI_L1_ARB 0x200
#define SMI_BUS_SEL 0x220
#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
/* All are MMU0 defaultly. Only specialize mmu1 here. */
#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+#define SMI_READ_FIFO_TH 0x230
#define SMI_M4U_TH 0x234
#define SMI_FIFO_TH1 0x238
#define SMI_FIFO_TH2 0x23c
@@ -360,6 +362,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
{.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
{.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
@@ -544,6 +547,13 @@ static struct platform_driver mtk_smi_larb_driver = {
}
};
+static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1_ARB, 0x1b},
+ {SMI_M4U_TH, 0xce810c85},
+ {SMI_FIFO_TH1, 0x43214c8},
+ {SMI_READ_FIFO_TH, 0x191f},
+};
+
static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
{SMI_L1LEN, 0xb},
{SMI_M4U_TH, 0xe100e10},
@@ -568,6 +578,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(0),
+ .init = mtk_smi_common_mt6795_init,
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -612,6 +628,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
{.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795},
{.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index e23ebd421f17..a9e8fd99730f 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -11,6 +11,76 @@
static const struct tegra_mc_client tegra234_mc_clients[] = {
{
+ .id = TEGRA234_MEMORY_CLIENT_MGBEARD,
+ .name = "mgbeard",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBRD,
+ .name = "mgbebrd",
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECRD,
+ .name = "mgbecrd",
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x2d0,
+ .security = 0x2d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDRD,
+ .name = "mgbedrd",
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x2d8,
+ .security = 0x2dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEAWR,
+ .name = "mgbeawr",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2e0,
+ .security = 0x2e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBWR,
+ .name = "mgbebwr",
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2f8,
+ .security = 0x2fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECWR,
+ .name = "mgbecwr",
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
.name = "sdmmcrab",
.sid = TEGRA234_SID_SDMMC4,
@@ -21,6 +91,16 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
},
}, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDWR,
+ .name = "mgbedwr",
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
.name = "sdmmcwab",
.sid = TEGRA234_SID_SDMMC4,
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
index 42fe67f1538e..49cd1f03884a 100644
--- a/drivers/mfd/bcm2835-pm.c
+++ b/drivers/mfd/bcm2835-pm.c
@@ -25,9 +25,52 @@ static const struct mfd_cell bcm2835_power_devs[] = {
{ .name = "bcm2835-power" },
};
+static int bcm2835_pm_get_pdata(struct platform_device *pdev,
+ struct bcm2835_pm *pm)
+{
+ if (of_find_property(pm->dev->of_node, "reg-names", NULL)) {
+ struct resource *res;
+
+ pm->base = devm_platform_ioremap_resource_byname(pdev, "pm");
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "asb");
+ if (res) {
+ pm->asb = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pm->asb))
+ pm->asb = NULL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "rpivid_asb");
+ if (res) {
+ pm->rpivid_asb = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pm->rpivid_asb))
+ pm->rpivid_asb = NULL;
+ }
+
+ return 0;
+ }
+
+ /* If no 'reg-names' property is found we can assume we're using old DTB. */
+ pm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ pm->asb = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pm->asb))
+ pm->asb = NULL;
+
+ pm->rpivid_asb = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(pm->rpivid_asb))
+ pm->rpivid_asb = NULL;
+
+ return 0;
+}
+
static int bcm2835_pm_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct bcm2835_pm *pm;
int ret;
@@ -39,10 +82,9 @@ static int bcm2835_pm_probe(struct platform_device *pdev)
pm->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pm->base))
- return PTR_ERR(pm->base);
+ ret = bcm2835_pm_get_pdata(pdev, pm);
+ if (ret)
+ return ret;
ret = devm_mfd_add_devices(dev, -1,
bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
@@ -50,30 +92,22 @@ static int bcm2835_pm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* We'll use the presence of the AXI ASB regs in the
+ /*
+ * We'll use the presence of the AXI ASB regs in the
* bcm2835-pm binding as the key for whether we can reference
* the full PM register range and support power domains.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- pm->asb = devm_ioremap_resource(dev, res);
- if (IS_ERR(pm->asb))
- return PTR_ERR(pm->asb);
-
- ret = devm_mfd_add_devices(dev, -1,
- bcm2835_power_devs,
- ARRAY_SIZE(bcm2835_power_devs),
- NULL, 0, NULL);
- if (ret)
- return ret;
- }
-
+ if (pm->asb)
+ return devm_mfd_add_devices(dev, -1, bcm2835_power_devs,
+ ARRAY_SIZE(bcm2835_power_devs),
+ NULL, 0, NULL);
return 0;
}
static const struct of_device_id bcm2835_pm_of_match[] = {
{ .compatible = "brcm,bcm2835-pm-wdt", },
{ .compatible = "brcm,bcm2835-pm", },
+ { .compatible = "brcm,bcm2711-pm", },
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 849c4204f550..93a6a8ee4716 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -83,7 +83,7 @@ config PHY_NS2_USB_DRD
config PHY_BRCM_SATA
tristate "Broadcom SATA PHY driver"
depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \
- ARCH_BCM_63XX || COMPILE_TEST
+ ARCH_BCMBCA || COMPILE_TEST
depends on OF
select GENERIC_PHY
default ARCH_BCM_IPROC
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 86ccf5970bc1..e461c071189b 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/canaan/Kconfig"
source "drivers/soc/fsl/Kconfig"
+source "drivers/soc/fujitsu/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/litex/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 919716e0e700..69ba6508cf2c 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SOC_CANAAN) += canaan/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
+obj-y += fujitsu/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-y += imx/
obj-y += ixp4xx/
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
index 78f0f1aeca57..92125dd65f33 100644
--- a/drivers/soc/amlogic/meson-mx-socinfo.c
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void)
np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
if (np) {
analog_top_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(analog_top_regmap))
return PTR_ERR(analog_top_regmap);
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index a10a417a87db..e93518763526 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -152,8 +152,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev)
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
- if (!pwrc)
+ if (!pwrc) {
+ of_node_put(sm_np);
return -ENOMEM;
+ }
pwrc->fw = meson_sm_get(sm_np);
of_node_put(sm_np);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 1e0041ec8132..5bcd047768b6 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -126,8 +126,7 @@
#define ASB_AXI_BRDG_ID 0x20
-#define ASB_READ(reg) readl(power->asb + (reg))
-#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+#define BCM2835_BRDG_ID 0x62726467
struct bcm2835_power_domain {
struct generic_pm_domain base;
@@ -142,24 +141,41 @@ struct bcm2835_power {
void __iomem *base;
/* AXI Async bridge registers. */
void __iomem *asb;
+ /* RPiVid bridge registers. */
+ void __iomem *rpivid_asb;
struct genpd_onecell_data pd_xlate;
struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
struct reset_controller_dev reset;
};
-static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable)
{
+ void __iomem *base = power->asb;
u64 start;
+ u32 val;
- if (!reg)
+ switch (reg) {
+ case 0:
return 0;
+ case ASB_V3D_S_CTRL:
+ case ASB_V3D_M_CTRL:
+ if (power->rpivid_asb)
+ base = power->rpivid_asb;
+ break;
+ }
start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
- ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
- while (ASB_READ(reg) & ASB_ACK) {
+ if (enable) {
+ val = readl(base + reg) & ~ASB_REQ_STOP;
+ } else {
+ val = readl(base + reg) | ASB_REQ_STOP;
+ }
+ writel(PM_PASSWORD | val, base + reg);
+
+ while (readl(base + reg) & ASB_ACK) {
cpu_relax();
if (ktime_get_ns() - start >= 1000)
return -ETIMEDOUT;
@@ -168,30 +184,24 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
return 0;
}
-static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
- u64 start;
-
- if (!reg)
- return 0;
-
- start = ktime_get_ns();
-
- /* Enable the module's async AXI bridges. */
- ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
- while (!(ASB_READ(reg) & ASB_ACK)) {
- cpu_relax();
- if (ktime_get_ns() - start >= 1000)
- return -ETIMEDOUT;
- }
+ return bcm2835_asb_control(power, reg, true);
+}
- return 0;
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+ return bcm2835_asb_control(power, reg, false);
}
static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
{
struct bcm2835_power *power = pd->power;
+ /* We don't run this on BCM2711 */
+ if (power->rpivid_asb)
+ return 0;
+
/* Enable functional isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
@@ -213,6 +223,10 @@ static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
int inrush;
bool powok;
+ /* We don't run this on BCM2711 */
+ if (power->rpivid_asb)
+ return 0;
+
/* If it was already powered on by the fw, leave it that way. */
if (PM_READ(pm_reg) & PM_POWUP)
return 0;
@@ -626,13 +640,23 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->dev = dev;
power->base = pm->base;
power->asb = pm->asb;
+ power->rpivid_asb = pm->rpivid_asb;
- id = ASB_READ(ASB_AXI_BRDG_ID);
- if (id != 0x62726467 /* "BRDG" */) {
+ id = readl(power->asb + ASB_AXI_BRDG_ID);
+ if (id != BCM2835_BRDG_ID /* "BRDG" */) {
dev_err(dev, "ASB register ID returned 0x%08x\n", id);
return -ENODEV;
}
+ if (power->rpivid_asb) {
+ id = readl(power->rpivid_asb + ASB_AXI_BRDG_ID);
+ if (id != BCM2835_BRDG_ID /* "BRDG" */) {
+ dev_err(dev, "RPiVid ASB register ID returned 0x%08x\n",
+ id);
+ return -ENODEV;
+ }
+ }
+
power->pd_xlate.domains = devm_kcalloc(dev,
ARRAY_SIZE(power_domain_names),
sizeof(*power->pd_xlate.domains),
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 2c975d79fe8e..1467bbd59690 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -340,12 +340,12 @@ static int __init brcmstb_biuctrl_init(void)
ret = setup_hifcpubiuctrl_regs(np);
if (ret)
- return ret;
+ goto out_put;
ret = mcp_write_pairing_set();
if (ret) {
pr_err("MCP: Unable to disable write pairing!\n");
- return ret;
+ goto out_put;
}
a72_b53_rac_enable_all(np);
@@ -353,6 +353,9 @@ static int __init brcmstb_biuctrl_init(void)
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
- return 0;
+ ret = 0;
+out_put:
+ of_node_put(np);
+ return ret;
}
early_initcall(brcmstb_biuctrl_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index 70ad0f3dce28..d6b30d521307 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -721,7 +721,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
/*
- * Slightly grosss to use the phy ver to get a memc,
+ * Slightly gross to use the phy ver to get a memc,
* offset but that is the only versioned things so far
* we can test for.
*/
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 5ed2fc1c53a0..6bf3e6a980ff 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -14,21 +14,16 @@
#include <linux/platform_device.h>
#include <linux/fsl/guts.h>
-struct guts {
- struct ccsr_guts __iomem *regs;
- bool little_endian;
-};
-
struct fsl_soc_die_attr {
char *die;
u32 svr;
u32 mask;
};
-static struct guts *guts;
-static struct soc_device_attribute soc_dev_attr;
-static struct soc_device *soc_dev;
-
+struct fsl_soc_data {
+ const char *sfp_compat;
+ u32 uid_offset;
+};
/* SoC die attribute definition for QorIQ platform */
static const struct fsl_soc_die_attr fsl_soc_die[] = {
@@ -120,88 +115,36 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
return NULL;
}
-static u32 fsl_guts_get_svr(void)
-{
- u32 svr = 0;
-
- if (!guts || !guts->regs)
- return svr;
-
- if (guts->little_endian)
- svr = ioread32(&guts->regs->svr);
- else
- svr = ioread32be(&guts->regs->svr);
-
- return svr;
-}
-
-static int fsl_guts_probe(struct platform_device *pdev)
+static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset)
{
- struct device_node *root, *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- const struct fsl_soc_die_attr *soc_die;
- const char *machine;
- u32 svr;
-
- /* Initialize guts */
- guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
- if (!guts)
- return -ENOMEM;
-
- guts->little_endian = of_property_read_bool(np, "little-endian");
+ struct device_node *np;
+ void __iomem *sfp_base;
+ u64 uid;
- guts->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(guts->regs))
- return PTR_ERR(guts->regs);
+ np = of_find_compatible_node(NULL, NULL, compat);
+ if (!np)
+ return 0;
- /* Register soc device */
- root = of_find_node_by_path("/");
- if (of_property_read_string(root, "model", &machine))
- of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine) {
- soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
- if (!soc_dev_attr.machine) {
- of_node_put(root);
- return -ENOMEM;
- }
+ sfp_base = of_iomap(np, 0);
+ if (!sfp_base) {
+ of_node_put(np);
+ return 0;
}
- of_node_put(root);
- svr = fsl_guts_get_svr();
- soc_die = fsl_soc_die_match(svr, fsl_soc_die);
- if (soc_die) {
- soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
- "QorIQ %s", soc_die->die);
- } else {
- soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
- }
- if (!soc_dev_attr.family)
- return -ENOMEM;
- soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
- "svr:0x%08x", svr);
- if (!soc_dev_attr.soc_id)
- return -ENOMEM;
- soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
- (svr >> 4) & 0xf, svr & 0xf);
- if (!soc_dev_attr.revision)
- return -ENOMEM;
+ uid = ioread32(sfp_base + offset);
+ uid <<= 32;
+ uid |= ioread32(sfp_base + offset + 4);
- soc_dev = soc_device_register(&soc_dev_attr);
- if (IS_ERR(soc_dev))
- return PTR_ERR(soc_dev);
+ iounmap(sfp_base);
+ of_node_put(np);
- pr_info("Machine: %s\n", soc_dev_attr.machine);
- pr_info("SoC family: %s\n", soc_dev_attr.family);
- pr_info("SoC ID: %s, Revision: %s\n",
- soc_dev_attr.soc_id, soc_dev_attr.revision);
- return 0;
+ return uid;
}
-static int fsl_guts_remove(struct platform_device *dev)
-{
- soc_device_unregister(soc_dev);
- return 0;
-}
+static const struct fsl_soc_data ls1028a_data = {
+ .sfp_compat = "fsl,ls1028a-sfp",
+ .uid_offset = 0x21c,
+};
/*
* Table for matching compatible strings, for device tree
@@ -231,28 +174,106 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", },
{ .compatible = "fsl,lx2160a-dcfg", },
- { .compatible = "fsl,ls1028a-dcfg", },
+ { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data},
{}
};
-MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
-
-static struct platform_driver fsl_guts_driver = {
- .driver = {
- .name = "fsl-guts",
- .of_match_table = fsl_guts_of_match,
- },
- .probe = fsl_guts_probe,
- .remove = fsl_guts_remove,
-};
static int __init fsl_guts_init(void)
{
- return platform_driver_register(&fsl_guts_driver);
-}
-core_initcall(fsl_guts_init);
+ struct soc_device_attribute *soc_dev_attr;
+ static struct soc_device *soc_dev;
+ const struct fsl_soc_die_attr *soc_die;
+ const struct fsl_soc_data *soc_data;
+ const struct of_device_id *match;
+ struct ccsr_guts __iomem *regs;
+ const char *machine = NULL;
+ struct device_node *np;
+ bool little_endian;
+ u64 soc_uid = 0;
+ u32 svr;
+ int ret;
-static void __exit fsl_guts_exit(void)
-{
- platform_driver_unregister(&fsl_guts_driver);
+ np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match);
+ if (!np)
+ return 0;
+ soc_data = match->data;
+
+ regs = of_iomap(np, 0);
+ if (!regs) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ little_endian = of_property_read_bool(np, "little-endian");
+ if (little_endian)
+ svr = ioread32(&regs->svr);
+ else
+ svr = ioread32be(&regs->svr);
+ iounmap(regs);
+ of_node_put(np);
+
+ /* Register soc device */
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ if (of_property_read_string(of_root, "model", &machine))
+ of_property_read_string_index(of_root, "compatible", 0, &machine);
+ if (machine) {
+ soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL);
+ if (!soc_dev_attr->machine)
+ goto err_nomem;
+ }
+
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s",
+ soc_die->die);
+ } else {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ");
+ }
+ if (!soc_dev_attr->family)
+ goto err_nomem;
+
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr);
+ if (!soc_dev_attr->soc_id)
+ goto err_nomem;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr->revision)
+ goto err_nomem;
+
+ if (soc_data)
+ soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat,
+ soc_data->uid_offset);
+ if (soc_uid)
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX",
+ soc_uid);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err;
+ }
+
+ pr_info("Machine: %s\n", soc_dev_attr->machine);
+ pr_info("SoC family: %s\n", soc_dev_attr->family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+
+err_nomem:
+ ret = -ENOMEM;
+err:
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr->serial_number);
+ kfree(soc_dev_attr);
+
+ return ret;
}
-module_exit(fsl_guts_exit);
+core_initcall(fsl_guts_init);
diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig
new file mode 100644
index 000000000000..987731e80612
--- /dev/null
+++ b/drivers/soc/fujitsu/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "fujitsu SoC drivers"
+
+config A64FX_DIAG
+ bool "A64FX diag driver"
+ depends on ARM64
+ depends on ACPI
+ help
+ Say Y here if you want to enable diag interrupt on Fujitsu A64FX.
+ This driver enables BMC's diagnostic requests and enables
+ A64FX-specific interrupts. This allows administrators to obtain
+ kernel dumps via diagnostic requests using ipmitool, etc.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile
new file mode 100644
index 000000000000..945bc1c14ad0
--- /dev/null
+++ b/drivers/soc/fujitsu/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o
diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c
new file mode 100644
index 000000000000..d87f348427bf
--- /dev/null
+++ b/drivers/soc/fujitsu/a64fx-diag.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A64FX diag driver.
+ * Copyright (c) 2022 Fujitsu Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define A64FX_DIAG_IRQ 1
+#define BMC_DIAG_INTERRUPT_ENABLE 0x40
+#define BMC_DIAG_INTERRUPT_STATUS 0x44
+#define BMC_DIAG_INTERRUPT_MASK BIT(31)
+
+struct a64fx_diag_priv {
+ void __iomem *mmsc_reg_base;
+ int irq;
+ bool has_nmi;
+};
+
+static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id)
+{
+ nmi_panic(NULL, "a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id)
+{
+ panic("a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_status_reg_addr;
+ u32 mmsc;
+
+ diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS;
+ mmsc = readl(diag_status_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK)
+ writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr);
+}
+
+static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) {
+ mmsc |= BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK) {
+ mmsc &= ~BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static int a64fx_diag_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a64fx_diag_priv *priv;
+ unsigned long irq_flags;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmsc_reg_base))
+ return PTR_ERR(priv->mmsc_reg_base);
+
+ priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ platform_set_drvdata(pdev, priv);
+
+ irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN |
+ IRQF_NO_THREAD;
+ ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags,
+ "a64fx_diag_nmi", NULL);
+ if (ret) {
+ ret = request_irq(priv->irq, &a64fx_diag_handler_irq,
+ irq_flags, "a64fx_diag_irq", NULL);
+ if (ret) {
+ dev_err(dev, "cannot register IRQ %d\n", ret);
+ return ret;
+ }
+ enable_irq(priv->irq);
+ } else {
+ enable_nmi(priv->irq);
+ priv->has_nmi = true;
+ }
+
+ a64fx_diag_interrupt_clear(priv);
+ a64fx_diag_interrupt_enable(priv);
+
+ return 0;
+}
+
+static int a64fx_diag_remove(struct platform_device *pdev)
+{
+ struct a64fx_diag_priv *priv = platform_get_drvdata(pdev);
+
+ a64fx_diag_interrupt_disable(priv);
+ a64fx_diag_interrupt_clear(priv);
+
+ if (priv->has_nmi)
+ free_nmi(priv->irq, NULL);
+ else
+ free_irq(priv->irq, NULL);
+
+ return 0;
+}
+
+static const struct acpi_device_id a64fx_diag_acpi_match[] = {
+ { "FUJI2007", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match);
+
+
+static struct platform_driver a64fx_diag_driver = {
+ .driver = {
+ .name = "a64fx_diag_driver",
+ .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match),
+ },
+ .probe = a64fx_diag_probe,
+ .remove = a64fx_diag_remove,
+};
+
+module_platform_driver(a64fx_diag_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>");
+MODULE_DESCRIPTION("A64FX diag driver");
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 85aa86e1338a..6383a4edc360 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -328,7 +328,9 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
if (!IS_ERR(domain->regulator)) {
ret = regulator_enable(domain->regulator);
if (ret) {
- dev_err(domain->dev, "failed to enable regulator\n");
+ dev_err(domain->dev,
+ "failed to enable regulator: %pe\n",
+ ERR_PTR(ret));
goto out_put_pm;
}
}
@@ -467,7 +469,9 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
if (!IS_ERR(domain->regulator)) {
ret = regulator_disable(domain->regulator);
if (ret) {
- dev_err(domain->dev, "failed to disable regulator\n");
+ dev_err(domain->dev,
+ "failed to disable regulator: %pe\n",
+ ERR_PTR(ret));
return ret;
}
}
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 7ebc28709e94..dff7529268e4 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -216,7 +216,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
if (IS_ERR(bc->bus_power_dev))
return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
- "failed to attach power domain\n");
+ "failed to attach power domain \"bus\"\n");
for (i = 0; i < bc_data->num_domains; i++) {
const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i];
@@ -238,7 +238,8 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
dev_pm_domain_attach_by_name(dev, data->gpc_name);
if (IS_ERR(domain->power_dev)) {
dev_err_probe(dev, PTR_ERR(domain->power_dev),
- "failed to attach power domain\n");
+ "failed to attach power domain \"%s\"\n",
+ data->gpc_name);
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
@@ -251,7 +252,9 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
- dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_err_probe(dev, ret,
+ "failed to init power domain \"%s\"\n",
+ data->gpc_name);
dev_pm_domain_detach(domain->power_dev, true);
goto cleanup_pds;
}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index fdd8bc08569e..3c3eedea35f7 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -73,4 +73,14 @@ config MTK_MMSYS
Say yes here to add support for the MediaTek Multimedia
Subsystem (MMSYS).
+config MTK_SVS
+ tristate "MediaTek Smart Voltage Scaling(SVS)"
+ depends on MTK_EFUSE && NVMEM
+ help
+ The Smart Voltage Scaling(SVS) engine is a piece of hardware
+ which has several controllers(banks) for calculating suitable
+ voltage to different power domains(CPU/GPU/CCI) according to
+ chip process corner, temperatures and other factors. Then DVFS
+ driver could apply SVS bank voltage to PMIC/Buck.
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 90270f8114ed..0e9e703c931a 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
+obj-$(CONFIG_MTK_SVS) += mtk-svs.o
diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h
new file mode 100644
index 000000000000..ef07c9dfdd9b
--- /dev/null
+++ b/drivers/soc/mediatek/mt6795-pm-domains.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt6795-power.h>
+
+/*
+ * MT6795 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = {
+ [MT6795_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT6795_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT6795_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1),
+ },
+ },
+ [MT6795_POWER_DOMAIN_MJC] = {
+ .name = "mjc",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x298,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT6795_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT6795_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt6795_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt6795,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795),
+};
+
+#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
index 71b8757e552d..99de67fe5de8 100644
--- a/drivers/soc/mediatek/mt8183-pm-domains.h
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -41,6 +41,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8183_POWER_DOMAIN_MFG] = {
.name = "mfg",
diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
index bf2dd0cdc3a8..108af61854a3 100644
--- a/drivers/soc/mediatek/mt8186-pm-domains.h
+++ b/drivers/soc/mediatek/mt8186-pm-domains.h
@@ -51,7 +51,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
MT8186_TOP_AXI_PROT_EN_1_CLR,
MT8186_TOP_AXI_PROT_EN_1_STA),
},
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8186_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
index 558c4ee4784a..b97b2051920f 100644
--- a/drivers/soc/mediatek/mt8192-pm-domains.h
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -58,6 +58,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8192_POWER_DOMAIN_MFG1] = {
.name = "mfg1",
@@ -85,6 +86,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
MT8192_TOP_AXI_PROT_EN_2_CLR,
MT8192_TOP_AXI_PROT_EN_2_STA1),
},
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8192_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h
index 938f4d51f5ae..d7387ea1b9c9 100644
--- a/drivers/soc/mediatek/mt8195-pm-domains.h
+++ b/drivers/soc/mediatek/mt8195-pm-domains.h
@@ -67,7 +67,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
.ctl_offs = 0x334,
.pwr_sta_offs = 0x174,
.pwr_sta2nd_offs = 0x178,
- .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_ALWAYS_ON,
},
[MT8195_POWER_DOMAIN_CSI_RX_TOP] = {
.name = "csi_rx_top",
@@ -162,7 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
},
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8195_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 24129a6c25f8..7abaf048d91e 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -10,6 +10,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60
#define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64
#define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0
+#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
+#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
#define MT8365_RDMA0_SOUT_COLOR0 0x1
#define MT8365_DITHER_MOUT_EN_DSI0 0x1
@@ -18,6 +21,10 @@
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
#define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0
#define MT8365_OVL0_MOUT_PATH0_SEL BIT(0)
+#define MT8365_RDMA1_SOUT_DPI0 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
+#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
{
@@ -55,6 +62,21 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
},
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
+ },
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index 7c65ad3d1f8a..fc13334db1b1 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -31,10 +31,7 @@ struct mtk_devapc_vio_dbgs {
u32 vio_dbg1;
};
-struct mtk_devapc_data {
- /* numbers of violation index */
- u32 vio_idx_num;
-
+struct mtk_devapc_regs_ofs {
/* reg offset */
u32 vio_mask_offset;
u32 vio_sta_offset;
@@ -46,6 +43,12 @@ struct mtk_devapc_data {
u32 vio_shift_con_offset;
};
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+ const struct mtk_devapc_regs_ofs *regs_ofs;
+};
+
struct mtk_devapc_context {
struct device *dev;
void __iomem *infra_base;
@@ -58,7 +61,7 @@ static void clear_vio_status(struct mtk_devapc_context *ctx)
void __iomem *reg;
int i;
- reg = ctx->infra_base + ctx->data->vio_sta_offset;
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset;
for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
writel(GENMASK(31, 0), reg + 4 * i);
@@ -73,7 +76,7 @@ static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
u32 val;
int i;
- reg = ctx->infra_base + ctx->data->vio_mask_offset;
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset;
if (mask)
val = GENMASK(31, 0);
@@ -116,11 +119,11 @@ static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
u32 val;
pd_vio_shift_sta_reg = ctx->infra_base +
- ctx->data->vio_shift_sta_offset;
+ ctx->data->regs_ofs->vio_shift_sta_offset;
pd_vio_shift_sel_reg = ctx->infra_base +
- ctx->data->vio_shift_sel_offset;
+ ctx->data->regs_ofs->vio_shift_sel_offset;
pd_vio_shift_con_reg = ctx->infra_base +
- ctx->data->vio_shift_con_offset;
+ ctx->data->regs_ofs->vio_shift_con_offset;
/* Find the minimum shift group which has violation */
val = readl(pd_vio_shift_sta_reg);
@@ -161,8 +164,8 @@ static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
void __iomem *vio_dbg0_reg;
void __iomem *vio_dbg1_reg;
- vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset;
- vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset;
+ vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset;
vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
@@ -200,7 +203,7 @@ static irqreturn_t devapc_violation_irq(int irq_number, void *data)
*/
static void start_devapc(struct mtk_devapc_context *ctx)
{
- writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset);
+ writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
mask_module_irq(ctx, false);
}
@@ -212,11 +215,10 @@ static void stop_devapc(struct mtk_devapc_context *ctx)
{
mask_module_irq(ctx, true);
- writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset);
+ writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
}
-static const struct mtk_devapc_data devapc_mt6779 = {
- .vio_idx_num = 511,
+static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = {
.vio_mask_offset = 0x0,
.vio_sta_offset = 0x400,
.vio_dbg0_offset = 0x900,
@@ -227,11 +229,24 @@ static const struct mtk_devapc_data devapc_mt6779 = {
.vio_shift_con_offset = 0xF20,
};
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
+static const struct mtk_devapc_data devapc_mt8186 = {
+ .vio_idx_num = 519,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
static const struct of_device_id mtk_devapc_dt_match[] = {
{
.compatible = "mediatek,mt6779-devapc",
.data = &devapc_mt6779,
}, {
+ .compatible = "mediatek,mt8186-devapc",
+ .data = &devapc_mt8186,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match);
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 981d56967e7a..5ea43de4e410 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -7,10 +7,12 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#define MT2701_MUTEX0_MOD0 0x2c
#define MT2701_MUTEX0_SOF0 0x30
@@ -80,6 +82,15 @@
#define MT8183_MUTEX_MOD_DISP_GAMMA0 16
#define MT8183_MUTEX_MOD_DISP_DITHER0 17
+#define MT8183_MUTEX_MOD_MDP_RDMA0 2
+#define MT8183_MUTEX_MOD_MDP_RSZ0 4
+#define MT8183_MUTEX_MOD_MDP_RSZ1 5
+#define MT8183_MUTEX_MOD_MDP_TDSHP0 6
+#define MT8183_MUTEX_MOD_MDP_WROT0 7
+#define MT8183_MUTEX_MOD_MDP_WDMA 8
+#define MT8183_MUTEX_MOD_MDP_AAL0 23
+#define MT8183_MUTEX_MOD_MDP_CCORR0 24
+
#define MT8173_MUTEX_MOD_DISP_OVL0 11
#define MT8173_MUTEX_MOD_DISP_OVL1 12
#define MT8173_MUTEX_MOD_DISP_RDMA0 13
@@ -110,6 +121,20 @@
#define MT8195_MUTEX_MOD_DISP_DP_INTF0 21
#define MT8195_MUTEX_MOD_DISP_PWM0 27
+#define MT8365_MUTEX_MOD_DISP_OVL0 7
+#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8
+#define MT8365_MUTEX_MOD_DISP_RDMA0 9
+#define MT8365_MUTEX_MOD_DISP_RDMA1 10
+#define MT8365_MUTEX_MOD_DISP_WDMA0 11
+#define MT8365_MUTEX_MOD_DISP_COLOR0 12
+#define MT8365_MUTEX_MOD_DISP_CCORR 13
+#define MT8365_MUTEX_MOD_DISP_AAL 14
+#define MT8365_MUTEX_MOD_DISP_GAMMA 15
+#define MT8365_MUTEX_MOD_DISP_DITHER 16
+#define MT8365_MUTEX_MOD_DISP_DSI0 17
+#define MT8365_MUTEX_MOD_DISP_PWM0 20
+#define MT8365_MUTEX_MOD_DISP_DPI0 22
+
#define MT2712_MUTEX_MOD_DISP_PWM2 10
#define MT2712_MUTEX_MOD_DISP_OVL0 11
#define MT2712_MUTEX_MOD_DISP_OVL1 12
@@ -185,6 +210,7 @@ struct mtk_mutex_data {
const unsigned int *mutex_sof;
const unsigned int mutex_mod_reg;
const unsigned int mutex_sof_reg;
+ const unsigned int *mutex_table_mod;
const bool no_clk;
};
@@ -194,6 +220,8 @@ struct mtk_mutex_ctx {
void __iomem *regs;
struct mtk_mutex mutex[10];
const struct mtk_mutex_data *data;
+ phys_addr_t addr;
+ struct cmdq_client_reg cmdq_reg;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -272,6 +300,17 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
+static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0,
+};
+
static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
@@ -315,6 +354,22 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0,
};
+static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR,
+ [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER,
+ [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0,
+ [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0,
+};
+
static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -399,6 +454,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.mutex_sof = mt8183_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8183_mutex_table_mod,
.no_clk = true,
};
@@ -423,6 +479,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = {
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8365_mutex_driver_data = {
+ .mutex_mod = mt8365_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .no_clk = true,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -572,6 +636,30 @@ void mtk_mutex_enable(struct mtk_mutex *mutex)
}
EXPORT_SYMBOL_GPL(mtk_mutex_enable);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (!mtx->cmdq_reg.size) {
+ dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set");
+ return -EINVAL;
+ }
+
+ cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys,
+ mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1);
+ return 0;
+#else
+ dev_err(mtx->dev, "Not support for enable MUTEX by CMDQ");
+ return -ENODEV;
+#endif
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq);
+
void mtk_mutex_disable(struct mtk_mutex *mutex)
{
struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
@@ -606,12 +694,67 @@ void mtk_mutex_release(struct mtk_mutex *mutex)
}
EXPORT_SYMBOL_GPL(mtk_mutex_release);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx, bool clear)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ unsigned int reg;
+ unsigned int offset;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_MOD_IDX_MDP_RDMA0 ||
+ idx >= MUTEX_MOD_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported MOD table index : %d", idx);
+ return -EINVAL;
+ }
+
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
+ mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+
+ if (clear)
+ reg &= ~BIT(mtx->data->mutex_table_mod[idx]);
+ else
+ reg |= BIT(mtx->data->mutex_table_mod[idx]);
+
+ writel_relaxed(reg, mtx->regs + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_mod);
+
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_SOF_IDX_SINGLE_MODE ||
+ idx >= MUTEX_SOF_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported SOF index : %d", idx);
+ return -EINVAL;
+ }
+
+ writel_relaxed(idx, mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_sof);
+
static int mtk_mutex_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_mutex_ctx *mtx;
struct resource *regs;
int i;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ int ret;
+#endif
mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL);
if (!mtx)
@@ -631,12 +774,18 @@ static int mtk_mutex_probe(struct platform_device *pdev)
}
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mtx->regs = devm_ioremap_resource(dev, regs);
+ mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mtx->regs)) {
dev_err(dev, "Failed to map mutex registers\n");
return PTR_ERR(mtx->regs);
}
+ mtx->addr = regs->start;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "No mediatek,gce-client-reg!\n");
+#endif
platform_set_drvdata(pdev, mtx);
@@ -665,6 +814,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8192_mutex_driver_data},
{ .compatible = "mediatek,mt8195-disp-mutex",
.data = &mt8195_mutex_driver_data},
+ { .compatible = "mediatek,mt8365-disp-mutex",
+ .data = &mt8365_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 5ced254b082b..9734f1091c69 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -16,6 +16,7 @@
#include <linux/regulator/consumer.h>
#include <linux/soc/mediatek/infracfg.h>
+#include "mt6795-pm-domains.h"
#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
@@ -428,6 +429,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
goto err_put_subsys_clocks;
}
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON))
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (scpsys->domains[id]) {
@@ -556,6 +560,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys)
static const struct of_device_id scpsys_of_match[] = {
{
+ .compatible = "mediatek,mt6795-power-controller",
+ .data = &mt6795_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8167-power-controller",
.data = &mt8167_scpsys_data,
},
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index daa24e890dd4..7d3c0c36316c 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -8,6 +8,8 @@
#define MTK_SCPD_SRAM_ISO BIT(2)
#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
#define MTK_SCPD_DOMAIN_SUPPLY BIT(4)
+/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */
+#define MTK_SCPD_ALWAYS_ON BIT(5)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index bf39a64f3ecc..d8cb0f833645 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -13,6 +13,9 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#define PWRAP_POLL_DELAY_US 10
+#define PWRAP_POLL_TIMEOUT_US 10000
+
#define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4
#define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10
#define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14
@@ -1140,12 +1143,9 @@ enum pwrap_type {
};
struct pmic_wrapper;
-struct pwrap_slv_type {
- const u32 *dew_regs;
- enum pmic_type type;
+
+struct pwrap_slv_regops {
const struct regmap_config *regmap;
- /* Flags indicating the capability for the target slave */
- u32 caps;
/*
* pwrap operations are highly associated with the PMIC types,
* so the pointers added increases flexibility allowing determination
@@ -1155,6 +1155,14 @@ struct pwrap_slv_type {
int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
};
+struct pwrap_slv_type {
+ const u32 *dew_regs;
+ enum pmic_type type;
+ const struct pwrap_slv_regops *regops;
+ /* Flags indicating the capability for the target slave */
+ u32 caps;
+};
+
struct pmic_wrapper {
struct device *dev;
void __iomem *base;
@@ -1241,27 +1249,14 @@ static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp)
(val & PWRAP_STATE_SYNC_IDLE0);
}
-static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
- bool (*fp)(struct pmic_wrapper *))
-{
- unsigned long timeout;
-
- timeout = jiffies + usecs_to_jiffies(10000);
-
- do {
- if (time_after(jiffies, timeout))
- return fp(wrp) ? 0 : -ETIMEDOUT;
- if (fp(wrp))
- return 0;
- } while (1);
-}
-
static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ bool tmp;
int ret;
u32 val;
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1273,7 +1268,8 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
val = (adr >> 1) << 16;
pwrap_writel(wrp, val, PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret)
return ret;
@@ -1290,11 +1286,14 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ bool tmp;
int ret, msb;
*rdata = 0;
for (msb = 0; msb < 2; msb++) {
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1303,7 +1302,8 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret)
return ret;
@@ -1318,14 +1318,16 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
- return wrp->slave->pwrap_read(wrp, adr, rdata);
+ return wrp->slave->regops->pwrap_read(wrp, adr, rdata);
}
static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
+ bool tmp;
int ret;
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1344,10 +1346,12 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
+ bool tmp;
int ret, msb, rdata;
for (msb = 0; msb < 2; msb++) {
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1373,7 +1377,7 @@ static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
- return wrp->slave->pwrap_write(wrp, adr, wdata);
+ return wrp->slave->regops->pwrap_write(wrp, adr, wdata);
}
static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
@@ -1388,6 +1392,7 @@ static int pwrap_regmap_write(void *context, u32 adr, u32 wdata)
static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
{
+ bool tmp;
int ret, i;
pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN);
@@ -1407,7 +1412,8 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
PWRAP_MAN_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
return ret;
@@ -1458,14 +1464,15 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
{
int ret;
+ bool tmp;
u32 rdata;
/* Enable dual IO mode */
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
/* Check IDLE & INIT_DONE in advance */
- ret = pwrap_wait_for_state(wrp,
- pwrap_is_fsm_idle_and_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
return ret;
@@ -1570,6 +1577,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
static int pwrap_init_cipher(struct pmic_wrapper *wrp)
{
int ret;
+ bool tmp;
u32 rdata = 0;
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
@@ -1624,14 +1632,16 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
}
/* wait for cipher data ready@AP */
- ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
+ ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret);
return ret;
}
/* wait for cipher data ready@PMIC */
- ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
+ ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev,
"timeout waiting for cipher data ready@PMIC\n");
@@ -1640,7 +1650,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
/* wait for cipher mode idle */
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
return ret;
@@ -1885,99 +1896,82 @@ static const struct regmap_config pwrap_regmap_config32 = {
.max_register = 0xffff,
};
+static const struct pwrap_slv_regops pwrap_regops16 = {
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+ .regmap = &pwrap_regmap_config16,
+};
+
+static const struct pwrap_slv_regops pwrap_regops32 = {
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
+ .regmap = &pwrap_regmap_config32,
+};
+
static const struct pwrap_slv_type pmic_mt6323 = {
.dew_regs = mt6323_regs,
.type = PMIC_MT6323,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
PWRAP_SLV_CAP_SECURITY,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6351 = {
.dew_regs = mt6351_regs,
.type = PMIC_MT6351,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = 0,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6357 = {
.dew_regs = mt6357_regs,
.type = PMIC_MT6357,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = 0,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6358 = {
.dew_regs = mt6358_regs,
.type = PMIC_MT6358,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6359 = {
.dew_regs = mt6359_regs,
.type = PMIC_MT6359,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_DUALIO,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6380 = {
.dew_regs = NULL,
.type = PMIC_MT6380,
- .regmap = &pwrap_regmap_config32,
+ .regops = &pwrap_regops32,
.caps = 0,
- .pwrap_read = pwrap_read32,
- .pwrap_write = pwrap_write32,
};
static const struct pwrap_slv_type pmic_mt6397 = {
.dew_regs = mt6397_regs,
.type = PMIC_MT6397,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
PWRAP_SLV_CAP_SECURITY,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct of_device_id of_slave_match_tbl[] = {
- {
- .compatible = "mediatek,mt6323",
- .data = &pmic_mt6323,
- }, {
- .compatible = "mediatek,mt6351",
- .data = &pmic_mt6351,
- }, {
- .compatible = "mediatek,mt6357",
- .data = &pmic_mt6357,
- }, {
- .compatible = "mediatek,mt6358",
- .data = &pmic_mt6358,
- }, {
- .compatible = "mediatek,mt6359",
- .data = &pmic_mt6359,
- }, {
- /* The MT6380 PMIC only implements a regulator, so we bind it
- * directly instead of using a MFD.
- */
- .compatible = "mediatek,mt6380-regulator",
- .data = &pmic_mt6380,
- }, {
- .compatible = "mediatek,mt6397",
- .data = &pmic_mt6397,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 },
+ { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 },
+ { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 },
+ { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 },
+ { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 },
+
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 },
+ { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
@@ -2136,45 +2130,19 @@ static struct pmic_wrapper_type pwrap_mt8186 = {
};
static const struct of_device_id of_pwrap_match_tbl[] = {
- {
- .compatible = "mediatek,mt2701-pwrap",
- .data = &pwrap_mt2701,
- }, {
- .compatible = "mediatek,mt6765-pwrap",
- .data = &pwrap_mt6765,
- }, {
- .compatible = "mediatek,mt6779-pwrap",
- .data = &pwrap_mt6779,
- }, {
- .compatible = "mediatek,mt6797-pwrap",
- .data = &pwrap_mt6797,
- }, {
- .compatible = "mediatek,mt6873-pwrap",
- .data = &pwrap_mt6873,
- }, {
- .compatible = "mediatek,mt7622-pwrap",
- .data = &pwrap_mt7622,
- }, {
- .compatible = "mediatek,mt8135-pwrap",
- .data = &pwrap_mt8135,
- }, {
- .compatible = "mediatek,mt8173-pwrap",
- .data = &pwrap_mt8173,
- }, {
- .compatible = "mediatek,mt8183-pwrap",
- .data = &pwrap_mt8183,
- }, {
- .compatible = "mediatek,mt8186-pwrap",
- .data = &pwrap_mt8186,
- }, {
- .compatible = "mediatek,mt8195-pwrap",
- .data = &pwrap_mt8195,
- }, {
- .compatible = "mediatek,mt8516-pwrap",
- .data = &pwrap_mt8516,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 },
+ { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 },
+ { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 },
+ { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 },
+ { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 },
+ { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 },
+ { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 },
+ { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 },
+ { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 },
+ { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 },
+ { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 },
+ { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
@@ -2185,7 +2153,6 @@ static int pwrap_probe(struct platform_device *pdev)
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
- struct resource *res;
if (np->child)
of_slave_id = of_match_node(of_slave_match_tbl, np->child);
@@ -2205,8 +2172,7 @@ static int pwrap_probe(struct platform_device *pdev)
wrp->slave = of_slave_id->data;
wrp->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
- wrp->base = devm_ioremap_resource(wrp->dev, res);
+ wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap");
if (IS_ERR(wrp->base))
return PTR_ERR(wrp->base);
@@ -2220,9 +2186,7 @@ static int pwrap_probe(struct platform_device *pdev)
}
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "pwrap-bridge");
- wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
+ wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge");
if (IS_ERR(wrp->bridge_base))
return PTR_ERR(wrp->bridge_base);
@@ -2315,13 +2279,18 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_out2;
+ }
+
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
IRQF_TRIGGER_HIGH,
"mt-pmic-pwrap", wrp);
if (ret)
goto err_out2;
- wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap);
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap);
if (IS_ERR(wrp->regmap)) {
ret = PTR_ERR(wrp->regmap);
goto err_out2;
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
new file mode 100644
index 000000000000..dee8664a12fd
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -0,0 +1,2403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+
+/* svs bank 1-line software id */
+#define SVSB_CPU_LITTLE BIT(0)
+#define SVSB_CPU_BIG BIT(1)
+#define SVSB_CCI BIT(2)
+#define SVSB_GPU BIT(3)
+
+/* svs bank 2-line type */
+#define SVSB_LOW BIT(8)
+#define SVSB_HIGH BIT(9)
+
+/* svs bank mode support */
+#define SVSB_MODE_ALL_DISABLE 0
+#define SVSB_MODE_INIT01 BIT(1)
+#define SVSB_MODE_INIT02 BIT(2)
+#define SVSB_MODE_MON BIT(3)
+
+/* svs bank volt flags */
+#define SVSB_INIT01_PD_REQ BIT(0)
+#define SVSB_INIT01_VOLT_IGNORE BIT(1)
+#define SVSB_INIT01_VOLT_INC_ONLY BIT(2)
+#define SVSB_MON_VOLT_IGNORE BIT(16)
+#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24)
+
+/* svs bank register common configuration */
+#define SVSB_DET_MAX 0xffff
+#define SVSB_DET_WINDOW 0xa28
+#define SVSB_DTHI 0x1
+#define SVSB_DTLO 0xfe
+#define SVSB_EN_INIT01 0x1
+#define SVSB_EN_INIT02 0x5
+#define SVSB_EN_MON 0x2
+#define SVSB_EN_OFF 0x0
+#define SVSB_INTEN_INIT0x 0x00005f01
+#define SVSB_INTEN_MONVOPEN 0x00ff0000
+#define SVSB_INTSTS_CLEAN 0x00ffffff
+#define SVSB_INTSTS_COMPLETE 0x1
+#define SVSB_INTSTS_MONVOP 0x00ff0000
+#define SVSB_RUNCONFIG_DEFAULT 0x80000000
+
+/* svs bank related setting */
+#define BITS8 8
+#define MAX_OPP_ENTRIES 16
+#define REG_BYTES 4
+#define SVSB_DC_SIGNED_BIT BIT(15)
+#define SVSB_DET_CLK_EN BIT(31)
+#define SVSB_TEMP_LOWER_BOUND 0xb2
+#define SVSB_TEMP_UPPER_BOUND 0x64
+
+static DEFINE_SPINLOCK(svs_lock);
+
+#define debug_fops_ro(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define debug_fops_rw(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .write = svs_##name##_debug_write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops}
+
+/**
+ * enum svsb_phase - svs bank phase enumeration
+ * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition
+ * @SVSB_PHASE_INIT01: svs bank basic init for data calibration
+ * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table
+ * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect
+ * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose)
+ *
+ * Each svs bank has its own independent phase and we enable each svs bank by
+ * running their phase orderly. However, when svs bank encounters unexpected
+ * condition, it will fire an irq (PHASE_ERROR) to inform svs software.
+ *
+ * svs bank general phase-enabled order:
+ * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON
+ */
+enum svsb_phase {
+ SVSB_PHASE_ERROR = 0,
+ SVSB_PHASE_INIT01,
+ SVSB_PHASE_INIT02,
+ SVSB_PHASE_MON,
+ SVSB_PHASE_MAX,
+};
+
+enum svs_reg_index {
+ DESCHAR = 0,
+ TEMPCHAR,
+ DETCHAR,
+ AGECHAR,
+ DCCONFIG,
+ AGECONFIG,
+ FREQPCT30,
+ FREQPCT74,
+ LIMITVALS,
+ VBOOT,
+ DETWINDOW,
+ CONFIG,
+ TSCALCS,
+ RUNCONFIG,
+ SVSEN,
+ INIT2VALS,
+ DCVALUES,
+ AGEVALUES,
+ VOP30,
+ VOP74,
+ TEMP,
+ INTSTS,
+ INTSTSRAW,
+ INTEN,
+ CHKINT,
+ CHKSHIFT,
+ STATUS,
+ VDESIGN30,
+ VDESIGN74,
+ DVT30,
+ DVT74,
+ AGECOUNT,
+ SMSTATE0,
+ SMSTATE1,
+ CTL0,
+ DESDETSEC,
+ TEMPAGESEC,
+ CTRLSPARE0,
+ CTRLSPARE1,
+ CTRLSPARE2,
+ CTRLSPARE3,
+ CORESEL,
+ THERMINTST,
+ INTST,
+ THSTAGE0ST,
+ THSTAGE1ST,
+ THSTAGE2ST,
+ THAHBST0,
+ THAHBST1,
+ SPARE0,
+ SPARE1,
+ SPARE2,
+ SPARE3,
+ THSLPEVEB,
+ SVS_REG_MAX,
+};
+
+static const u32 svs_regs_v2[] = {
+ [DESCHAR] = 0xc00,
+ [TEMPCHAR] = 0xc04,
+ [DETCHAR] = 0xc08,
+ [AGECHAR] = 0xc0c,
+ [DCCONFIG] = 0xc10,
+ [AGECONFIG] = 0xc14,
+ [FREQPCT30] = 0xc18,
+ [FREQPCT74] = 0xc1c,
+ [LIMITVALS] = 0xc20,
+ [VBOOT] = 0xc24,
+ [DETWINDOW] = 0xc28,
+ [CONFIG] = 0xc2c,
+ [TSCALCS] = 0xc30,
+ [RUNCONFIG] = 0xc34,
+ [SVSEN] = 0xc38,
+ [INIT2VALS] = 0xc3c,
+ [DCVALUES] = 0xc40,
+ [AGEVALUES] = 0xc44,
+ [VOP30] = 0xc48,
+ [VOP74] = 0xc4c,
+ [TEMP] = 0xc50,
+ [INTSTS] = 0xc54,
+ [INTSTSRAW] = 0xc58,
+ [INTEN] = 0xc5c,
+ [CHKINT] = 0xc60,
+ [CHKSHIFT] = 0xc64,
+ [STATUS] = 0xc68,
+ [VDESIGN30] = 0xc6c,
+ [VDESIGN74] = 0xc70,
+ [DVT30] = 0xc74,
+ [DVT74] = 0xc78,
+ [AGECOUNT] = 0xc7c,
+ [SMSTATE0] = 0xc80,
+ [SMSTATE1] = 0xc84,
+ [CTL0] = 0xc88,
+ [DESDETSEC] = 0xce0,
+ [TEMPAGESEC] = 0xce4,
+ [CTRLSPARE0] = 0xcf0,
+ [CTRLSPARE1] = 0xcf4,
+ [CTRLSPARE2] = 0xcf8,
+ [CTRLSPARE3] = 0xcfc,
+ [CORESEL] = 0xf00,
+ [THERMINTST] = 0xf04,
+ [INTST] = 0xf08,
+ [THSTAGE0ST] = 0xf0c,
+ [THSTAGE1ST] = 0xf10,
+ [THSTAGE2ST] = 0xf14,
+ [THAHBST0] = 0xf18,
+ [THAHBST1] = 0xf1c,
+ [SPARE0] = 0xf20,
+ [SPARE1] = 0xf24,
+ [SPARE2] = 0xf28,
+ [SPARE3] = 0xf2c,
+ [THSLPEVEB] = 0xf30,
+};
+
+/**
+ * struct svs_platform - svs platform control
+ * @name: svs platform name
+ * @base: svs platform register base
+ * @dev: svs platform device
+ * @main_clk: main clock for svs bank
+ * @pbank: svs bank pointer needing to be protected by spin_lock section
+ * @banks: svs banks that svs platform supports
+ * @rst: svs platform reset control
+ * @efuse_parsing: svs platform efuse parsing function pointer
+ * @probe: svs platform probe function pointer
+ * @irqflags: svs platform irq settings flags
+ * @efuse_max: total number of svs efuse
+ * @tefuse_max: total number of thermal efuse
+ * @regs: svs platform registers map
+ * @bank_max: total number of svs banks
+ * @efuse: svs efuse data received from NVMEM framework
+ * @tefuse: thermal efuse data received from NVMEM framework
+ */
+struct svs_platform {
+ char *name;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *main_clk;
+ struct svs_bank *pbank;
+ struct svs_bank *banks;
+ struct reset_control *rst;
+ bool (*efuse_parsing)(struct svs_platform *svsp);
+ int (*probe)(struct svs_platform *svsp);
+ unsigned long irqflags;
+ size_t efuse_max;
+ size_t tefuse_max;
+ const u32 *regs;
+ u32 bank_max;
+ u32 *efuse;
+ u32 *tefuse;
+};
+
+struct svs_platform_data {
+ char *name;
+ struct svs_bank *banks;
+ bool (*efuse_parsing)(struct svs_platform *svsp);
+ int (*probe)(struct svs_platform *svsp);
+ unsigned long irqflags;
+ const u32 *regs;
+ u32 bank_max;
+};
+
+/**
+ * struct svs_bank - svs bank representation
+ * @dev: bank device
+ * @opp_dev: device for opp table/buck control
+ * @init_completion: the timeout completion for bank init
+ * @buck: regulator used by opp_dev
+ * @tzd: thermal zone device for getting temperature
+ * @lock: mutex lock to protect voltage update process
+ * @set_freq_pct: function pointer to set bank frequency percent table
+ * @get_volts: function pointer to get bank voltages
+ * @name: bank name
+ * @buck_name: regulator name
+ * @tzone_name: thermal zone name
+ * @phase: bank current phase
+ * @volt_od: bank voltage overdrive
+ * @reg_data: bank register data in different phase for debug purpose
+ * @pm_runtime_enabled_count: bank pm runtime enabled count
+ * @mode_support: bank mode support.
+ * @freq_base: reference frequency for bank init
+ * @turn_freq_base: refenrece frequency for 2-line turn point
+ * @vboot: voltage request for bank init01 only
+ * @opp_dfreq: default opp frequency table
+ * @opp_dvolt: default opp voltage table
+ * @freq_pct: frequency percent table for bank init
+ * @volt: bank voltage table
+ * @volt_step: bank voltage step
+ * @volt_base: bank voltage base
+ * @volt_flags: bank voltage flags
+ * @vmax: bank voltage maximum
+ * @vmin: bank voltage minimum
+ * @age_config: bank age configuration
+ * @age_voffset_in: bank age voltage offset
+ * @dc_config: bank dc configuration
+ * @dc_voffset_in: bank dc voltage offset
+ * @dvt_fixed: bank dvt fixed value
+ * @vco: bank VCO value
+ * @chk_shift: bank chicken shift
+ * @core_sel: bank selection
+ * @opp_count: bank opp count
+ * @int_st: bank interrupt identification
+ * @sw_id: bank software identification
+ * @cpu_id: cpu core id for SVS CPU bank use only
+ * @ctl0: TS-x selection
+ * @temp: bank temperature
+ * @tzone_htemp: thermal zone high temperature threshold
+ * @tzone_htemp_voffset: thermal zone high temperature voltage offset
+ * @tzone_ltemp: thermal zone low temperature threshold
+ * @tzone_ltemp_voffset: thermal zone low temperature voltage offset
+ * @bts: svs efuse data
+ * @mts: svs efuse data
+ * @bdes: svs efuse data
+ * @mdes: svs efuse data
+ * @mtdes: svs efuse data
+ * @dcbdet: svs efuse data
+ * @dcmdet: svs efuse data
+ * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank
+ * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank
+ *
+ * Svs bank will generate suitalbe voltages by below general math equation
+ * and provide these voltages to opp voltage table.
+ *
+ * opp_volt[i] = (volt[i] * volt_step) + volt_base;
+ */
+struct svs_bank {
+ struct device *dev;
+ struct device *opp_dev;
+ struct completion init_completion;
+ struct regulator *buck;
+ struct thermal_zone_device *tzd;
+ struct mutex lock; /* lock to protect voltage update process */
+ void (*set_freq_pct)(struct svs_platform *svsp);
+ void (*get_volts)(struct svs_platform *svsp);
+ char *name;
+ char *buck_name;
+ char *tzone_name;
+ enum svsb_phase phase;
+ s32 volt_od;
+ u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX];
+ u32 pm_runtime_enabled_count;
+ u32 mode_support;
+ u32 freq_base;
+ u32 turn_freq_base;
+ u32 vboot;
+ u32 opp_dfreq[MAX_OPP_ENTRIES];
+ u32 opp_dvolt[MAX_OPP_ENTRIES];
+ u32 freq_pct[MAX_OPP_ENTRIES];
+ u32 volt[MAX_OPP_ENTRIES];
+ u32 volt_step;
+ u32 volt_base;
+ u32 volt_flags;
+ u32 vmax;
+ u32 vmin;
+ u32 age_config;
+ u32 age_voffset_in;
+ u32 dc_config;
+ u32 dc_voffset_in;
+ u32 dvt_fixed;
+ u32 vco;
+ u32 chk_shift;
+ u32 core_sel;
+ u32 opp_count;
+ u32 int_st;
+ u32 sw_id;
+ u32 cpu_id;
+ u32 ctl0;
+ u32 temp;
+ u32 tzone_htemp;
+ u32 tzone_htemp_voffset;
+ u32 tzone_ltemp;
+ u32 tzone_ltemp_voffset;
+ u32 bts;
+ u32 mts;
+ u32 bdes;
+ u32 mdes;
+ u32 mtdes;
+ u32 dcbdet;
+ u32 dcmdet;
+ u32 turn_pt;
+ u32 type;
+};
+
+static u32 percent(u32 numerator, u32 denominator)
+{
+ /* If not divide 1000, "numerator * 100" will have data overflow. */
+ numerator /= 1000;
+ denominator /= 1000;
+
+ return DIV_ROUND_UP(numerator * 100, denominator);
+}
+
+static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i)
+{
+ return readl_relaxed(svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_writel_relaxed(struct svs_platform *svsp, u32 val,
+ enum svs_reg_index rg_i)
+{
+ writel_relaxed(val, svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_switch_bank(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_writel_relaxed(svsp, svsb->core_sel, CORESEL);
+}
+
+static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (svsb_volt * svsb_volt_step) + svsb_volt_base;
+}
+
+static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (opp_u_volt - svsb_volt_base) / svsb_volt_step;
+}
+
+static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
+{
+ struct dev_pm_opp *opp;
+ u32 i, opp_u_volt;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ true);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %u (%ld)\n",
+ svsb->opp_dfreq[i], PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ opp_u_volt = dev_pm_opp_get_voltage(opp);
+ svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
+{
+ int ret = -EPERM, tzone_temp = 0;
+ u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop;
+
+ mutex_lock(&svsb->lock);
+
+ /*
+ * 2-line bank updates its corresponding opp volts.
+ * 1-line bank updates all opp volts.
+ */
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ } else {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+
+ /* Get thermal effect */
+ if (svsb->phase == SVSB_PHASE_MON) {
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND &&
+ svsb->temp < SVSB_TEMP_LOWER_BOUND)) {
+ dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n",
+ svsb->tzone_name, ret, svsb->temp);
+ svsb->phase = SVSB_PHASE_ERROR;
+ }
+
+ if (tzone_temp >= svsb->tzone_htemp)
+ temp_voffset += svsb->tzone_htemp_voffset;
+ else if (tzone_temp <= svsb->tzone_ltemp)
+ temp_voffset += svsb->tzone_ltemp_voffset;
+
+ /* 2-line bank update all opp volts when running mon mode */
+ if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+ }
+
+ /* vmin <= svsb_volt (opp_volt) <= default opp voltage */
+ for (i = opp_start; i < opp_stop; i++) {
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ opp_volt = svsb->opp_dvolt[i];
+ break;
+ case SVSB_PHASE_INIT01:
+ /* do nothing */
+ goto unlock_mutex;
+ case SVSB_PHASE_INIT02:
+ svsb_volt = max(svsb->volt[i], svsb->vmin);
+ opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ break;
+ case SVSB_PHASE_MON:
+ svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin);
+ opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ break;
+ default:
+ dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ opp_volt = min(opp_volt, svsb->opp_dvolt[i]);
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ opp_volt, opp_volt,
+ svsb->opp_dvolt[i]);
+ if (ret) {
+ dev_err(svsb->dev, "set %uuV fail: %d\n",
+ opp_volt, ret);
+ goto unlock_mutex;
+ }
+ }
+
+unlock_mutex:
+ mutex_unlock(&svsb->lock);
+
+ return ret;
+}
+
+static int svs_dump_debug_show(struct seq_file *m, void *p)
+{
+ struct svs_platform *svsp = (struct svs_platform *)m->private;
+ struct svs_bank *svsb;
+ unsigned long svs_reg_addr;
+ u32 idx, i, j, bank_id;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse && svsp->efuse[i])
+ seq_printf(m, "M_HW_RES%d = 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse)
+ seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n",
+ i, svsp->tefuse[i]);
+
+ for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) {
+ svsb = &svsp->banks[idx];
+
+ for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) {
+ seq_printf(m, "Bank_number = %u\n", bank_id);
+
+ if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02)
+ seq_printf(m, "mode = init%d\n", i);
+ else if (i == SVSB_PHASE_MON)
+ seq_puts(m, "mode = mon\n");
+ else
+ seq_puts(m, "mode = error\n");
+
+ for (j = DESCHAR; j < SVS_REG_MAX; j++) {
+ svs_reg_addr = (unsigned long)(svsp->base +
+ svsp->regs[j]);
+ seq_printf(m, "0x%08lx = 0x%08x\n",
+ svs_reg_addr, svsb->reg_data[i][j]);
+ }
+ }
+ }
+
+ return 0;
+}
+
+debug_fops_ro(dump);
+
+static int svs_enable_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ seq_puts(m, "disabled\n");
+ break;
+ case SVSB_PHASE_INIT01:
+ seq_puts(m, "init1\n");
+ break;
+ case SVSB_PHASE_INIT02:
+ seq_puts(m, "init2\n");
+ break;
+ case SVSB_PHASE_MON:
+ seq_puts(m, "mon mode\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static ssize_t svs_enable_debug_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct svs_bank *svsb = file_inode(filp)->i_private;
+ struct svs_platform *svsp = dev_get_drvdata(svsb->dev);
+ unsigned long flags;
+ int enabled, ret;
+ char *buf = NULL;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = kstrtoint(buf, 10, &enabled);
+ if (ret)
+ return ret;
+
+ if (!enabled) {
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svsb->mode_support = SVSB_MODE_ALL_DISABLE;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+ }
+
+ kfree(buf);
+
+ return count;
+}
+
+debug_fops_rw(enable);
+
+static int svs_status_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+ struct dev_pm_opp *opp;
+ int tzone_temp = 0, ret;
+ u32 i;
+
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret)
+ seq_printf(m, "%s: temperature ignore, turn_pt = %u\n",
+ svsb->name, svsb->turn_pt);
+ else
+ seq_printf(m, "%s: temperature = %d, turn_pt = %u\n",
+ svsb->name, tzone_temp, svsb->turn_pt);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i], true);
+ if (IS_ERR(opp)) {
+ seq_printf(m, "%s: cannot find freq = %u (%ld)\n",
+ svsb->name, svsb->opp_dfreq[i],
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ",
+ i, svsb->opp_dfreq[i], i,
+ dev_pm_opp_get_voltage(opp));
+ seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n",
+ i, svsb->volt[i], i, svsb->freq_pct[i]);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+debug_fops_ro(status);
+
+static int svs_create_debug_cmds(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dentry *svs_dir, *svsb_dir, *file_entry;
+ const char *d = "/sys/kernel/debug/svs";
+ u32 i, idx;
+
+ struct svs_dentry {
+ const char *name;
+ const struct file_operations *fops;
+ };
+
+ struct svs_dentry svs_entries[] = {
+ svs_dentry_data(dump),
+ };
+
+ struct svs_dentry svsb_entries[] = {
+ svs_dentry_data(enable),
+ svs_dentry_data(status),
+ };
+
+ svs_dir = debugfs_create_dir("svs", NULL);
+ if (IS_ERR(svs_dir)) {
+ dev_err(svsp->dev, "cannot create %s: %ld\n",
+ d, PTR_ERR(svs_dir));
+ return PTR_ERR(svs_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svs_entries); i++) {
+ file_entry = debugfs_create_file(svs_entries[i].name, 0664,
+ svs_dir, svsp,
+ svs_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svs_entries[i].name, PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->mode_support == SVSB_MODE_ALL_DISABLE)
+ continue;
+
+ svsb_dir = debugfs_create_dir(svsb->name, svs_dir);
+ if (IS_ERR(svsb_dir)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svsb->name, PTR_ERR(svsb_dir));
+ return PTR_ERR(svsb_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) {
+ file_entry = debugfs_create_file(svsb_entries[i].name,
+ 0664, svsb_dir, svsb,
+ svsb_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "no %s/%s/%s?: %ld\n",
+ d, svsb->name, svsb_entries[i].name,
+ PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
+{
+ u32 vx;
+
+ if (v0 == v1 || f0 == f1)
+ return v0;
+
+ /* *100 to have decimal fraction factor */
+ vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx));
+
+ return DIV_ROUND_UP(vx, 100);
+}
+
+static void svs_get_bank_volts_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt;
+ u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ if (svsb->phase == SVSB_PHASE_MON &&
+ svsb->volt_flags & SVSB_MON_VOLT_IGNORE)
+ return;
+
+ vop74 = svs_readl_relaxed(svsp, VOP74);
+ vop30 = svs_readl_relaxed(svsp, VOP30);
+
+ /* Target is to set svsb->volt[] by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] ~ volt[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
+ j = svsb->opp_count - 7;
+ svsb->volt[turn_pt] = vop30 & GENMASK(7, 0);
+ shift_byte++;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */
+ for (i = turn_pt + 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt],
+ svsb->freq_pct[j],
+ svsb->volt[turn_pt],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] + volt[j] ~ volt[turn_pt - 1] */
+ j = turn_pt - 7;
+ svsb->volt[0] = vop30 & GENMASK(7, 0);
+ shift_byte++;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[1] ~ volt[j - 1] by interpolate */
+ for (i = 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[0],
+ svsb->freq_pct[j],
+ svsb->volt[0],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] ~ volt[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ }
+ }
+
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ }
+
+ for (i = opp_start; i < opp_stop; i++)
+ if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT)
+ svsb->volt[i] -= svsb->dvt_fixed;
+}
+
+static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0;
+ u32 b_sft, shift_byte = 0, turn_pt;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) {
+ svsb->turn_pt = i;
+ break;
+ }
+ }
+
+ turn_pt = svsb->turn_pt;
+
+ /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * If we don't handle this situation,
+ * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
+ * and this leads SVSB_LOW to work abnormally.
+ */
+ if (turn_pt == 0)
+ freq_pct30 = svsb->freq_pct[0];
+
+ /* freq_pct[0] ~ freq_pct[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /*
+ * freq_pct[turn_pt] +
+ * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1]
+ */
+ freq_pct30 = svsb->freq_pct[turn_pt];
+ shift_byte++;
+ j = svsb->opp_count - 7;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * freq_pct[0] +
+ * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1]
+ */
+ freq_pct30 = svsb->freq_pct[0];
+ shift_byte++;
+ j = turn_pt - 7;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ }
+
+ svs_writel_relaxed(svsp, freq_pct74, FREQPCT74);
+ svs_writel_relaxed(svsp, freq_pct30, FREQPCT30);
+}
+
+static void svs_get_bank_volts_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 temp, i;
+
+ temp = svs_readl_relaxed(svsp, VOP74);
+ svsb->volt[14] = (temp >> 24) & GENMASK(7, 0);
+ svsb->volt[12] = (temp >> 16) & GENMASK(7, 0);
+ svsb->volt[10] = (temp >> 8) & GENMASK(7, 0);
+ svsb->volt[8] = (temp & GENMASK(7, 0));
+
+ temp = svs_readl_relaxed(svsp, VOP30);
+ svsb->volt[6] = (temp >> 24) & GENMASK(7, 0);
+ svsb->volt[4] = (temp >> 16) & GENMASK(7, 0);
+ svsb->volt[2] = (temp >> 8) & GENMASK(7, 0);
+ svsb->volt[0] = (temp & GENMASK(7, 0));
+
+ for (i = 0; i <= 12; i += 2)
+ svsb->volt[i + 1] = interpolate(svsb->freq_pct[i],
+ svsb->freq_pct[i + 2],
+ svsb->volt[i],
+ svsb->volt[i + 2],
+ svsb->freq_pct[i + 1]);
+
+ svsb->volt[15] = interpolate(svsb->freq_pct[12],
+ svsb->freq_pct[14],
+ svsb->volt[12],
+ svsb->volt[14],
+ svsb->freq_pct[15]);
+
+ for (i = 0; i < svsb->opp_count; i++)
+ svsb->volt[i] += svsb->volt_od;
+}
+
+static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_writel_relaxed(svsp,
+ (svsb->freq_pct[14] << 24) |
+ (svsb->freq_pct[12] << 16) |
+ (svsb->freq_pct[10] << 8) |
+ svsb->freq_pct[8],
+ FREQPCT74);
+
+ svs_writel_relaxed(svsp,
+ (svsb->freq_pct[6] << 24) |
+ (svsb->freq_pct[4] << 16) |
+ (svsb->freq_pct[2] << 8) |
+ svsb->freq_pct[0],
+ FREQPCT30);
+}
+
+static void svs_set_bank_phase(struct svs_platform *svsp,
+ enum svsb_phase target_phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs;
+
+ svs_switch_bank(svsp);
+
+ des_char = (svsb->bdes << 8) | svsb->mdes;
+ svs_writel_relaxed(svsp, des_char, DESCHAR);
+
+ temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed;
+ svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
+
+ det_char = (svsb->dcbdet << 8) | svsb->dcmdet;
+ svs_writel_relaxed(svsp, det_char, DETCHAR);
+
+ svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
+ svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG);
+ svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG);
+
+ svsb->set_freq_pct(svsp);
+
+ limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) |
+ (SVSB_DTHI << 8) | SVSB_DTLO;
+ svs_writel_relaxed(svsp, limit_vals, LIMITVALS);
+
+ svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
+ svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
+ svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
+ svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+
+ switch (target_phase) {
+ case SVSB_PHASE_INIT01:
+ svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN);
+ break;
+ case SVSB_PHASE_INIT02:
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in;
+ svs_writel_relaxed(svsp, init2vals, INIT2VALS);
+ svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN);
+ break;
+ case SVSB_PHASE_MON:
+ ts_calcs = (svsb->bts << 12) | svsb->mts;
+ svs_writel_relaxed(svsp, ts_calcs, TSCALCS);
+ svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN);
+ svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN);
+ break;
+ default:
+ dev_err(svsb->dev, "requested unknown target phase: %u\n",
+ target_phase);
+ break;
+ }
+}
+
+static inline void svs_save_bank_register_data(struct svs_platform *svsp,
+ enum svsb_phase phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ enum svs_reg_index rg_i;
+
+ for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++)
+ svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i);
+}
+
+static inline void svs_error_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, CORESEL));
+ dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n",
+ svs_readl_relaxed(svsp, SVSEN),
+ svs_readl_relaxed(svsp, INTSTS));
+ dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n",
+ svs_readl_relaxed(svsp, SMSTATE0),
+ svs_readl_relaxed(svsp, SMSTATE1));
+ dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+}
+
+static inline void svs_init01_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VDESIGN74),
+ svs_readl_relaxed(svsp, VDESIGN30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01);
+
+ svsb->phase = SVSB_PHASE_INIT01;
+ svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) &
+ GENMASK(15, 0)) + 1;
+ if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE ||
+ (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT &&
+ svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY))
+ svsb->dc_voffset_in = 0;
+
+ svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) &
+ GENMASK(15, 0);
+
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svsb->core_sel &= ~SVSB_DET_CLK_EN;
+}
+
+static inline void svs_init02_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VOP74),
+ svs_readl_relaxed(svsp, VOP30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02);
+
+ svsb->phase = SVSB_PHASE_INIT02;
+ svsb->get_volts(svsp);
+
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+}
+
+static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_MON);
+
+ svsb->phase = SVSB_PHASE_MON;
+ svsb->get_volts(svsp);
+
+ svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS);
+}
+
+static irqreturn_t svs_isr(int irq, void *data)
+{
+ struct svs_platform *svsp = data;
+ struct svs_bank *svsb = NULL;
+ unsigned long flags;
+ u32 idx, int_sts, svs_en;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name);
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+
+ /* Find out which svs bank fires interrupt */
+ if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) {
+ spin_unlock_irqrestore(&svs_lock, flags);
+ continue;
+ }
+
+ svs_switch_bank(svsp);
+ int_sts = svs_readl_relaxed(svsp, INTSTS);
+ svs_en = svs_readl_relaxed(svsp, SVSEN);
+
+ if (int_sts == SVSB_INTSTS_COMPLETE &&
+ svs_en == SVSB_EN_INIT01)
+ svs_init01_isr_handler(svsp);
+ else if (int_sts == SVSB_INTSTS_COMPLETE &&
+ svs_en == SVSB_EN_INIT02)
+ svs_init02_isr_handler(svsp);
+ else if (int_sts & SVSB_INTSTS_MONVOP)
+ svs_mon_mode_isr_handler(svsp);
+ else
+ svs_error_isr_handler(svsp);
+
+ spin_unlock_irqrestore(&svs_lock, flags);
+ break;
+ }
+
+ svs_adjust_pm_opp_volts(svsb);
+
+ if (svsb->phase == SVSB_PHASE_INIT01 ||
+ svsb->phase == SVSB_PHASE_INIT02)
+ complete(&svsb->init_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int svs_init01(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ bool search_done;
+ int ret = 0, r;
+ u32 opp_freq, opp_vboot, buck_volt, idx, i;
+
+ /* Keep CPUs' core power on for svs_init01 initialization */
+ cpuidle_pause_and_lock();
+
+ /* Svs bank init01 preparation - power enable */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ ret = regulator_enable(svsb->buck);
+ if (ret) {
+ dev_err(svsb->dev, "%s enable fail: %d\n",
+ svsb->buck_name, ret);
+ goto svs_init01_resume_cpuidle;
+ }
+
+ /* Some buck doesn't support mode change. Show fail msg only */
+ ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST);
+ if (ret)
+ dev_notice(svsb->dev, "set fast mode fail: %d\n", ret);
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ if (!pm_runtime_enabled(svsb->opp_dev)) {
+ pm_runtime_enable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count++;
+ }
+
+ ret = pm_runtime_get_sync(svsb->opp_dev);
+ if (ret < 0) {
+ dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
+ goto svs_init01_resume_cpuidle;
+ }
+ }
+ }
+
+ /*
+ * Svs bank init01 preparation - vboot voltage adjustment
+ * Sometimes two svs banks use the same buck. Therefore,
+ * we have to set each svs bank to target voltage(vboot) first.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ /*
+ * Find the fastest freq that can be run at vboot and
+ * fix to that freq until svs_init01 is done.
+ */
+ search_done = false;
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp_freq = svsb->opp_dfreq[i];
+ if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) {
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ opp_freq,
+ opp_vboot,
+ opp_vboot,
+ opp_vboot);
+ if (ret) {
+ dev_err(svsb->dev,
+ "set opp %uuV vboot fail: %d\n",
+ opp_vboot, ret);
+ goto svs_init01_finish;
+ }
+
+ search_done = true;
+ } else {
+ ret = dev_pm_opp_disable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (ret) {
+ dev_err(svsb->dev,
+ "opp %uHz disable fail: %d\n",
+ svsb->opp_dfreq[i], ret);
+ goto svs_init01_finish;
+ }
+ }
+ }
+ }
+
+ /* Svs bank init01 begins */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ buck_volt = regulator_get_voltage(svsb->buck);
+ if (buck_volt != opp_vboot) {
+ dev_err(svsb->dev,
+ "buck voltage: %uuV, expected vboot: %uuV\n",
+ buck_volt, opp_vboot);
+ ret = -EPERM;
+ goto svs_init01_finish;
+ }
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT01);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init01 completion timeout\n");
+ ret = -EBUSY;
+ goto svs_init01_finish;
+ }
+ }
+
+svs_init01_finish:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ r = dev_pm_opp_enable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (r)
+ dev_err(svsb->dev, "opp %uHz enable fail: %d\n",
+ svsb->opp_dfreq[i], r);
+ }
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ r = pm_runtime_put_sync(svsb->opp_dev);
+ if (r)
+ dev_err(svsb->dev, "mtcmos off fail: %d\n", r);
+
+ if (svsb->pm_runtime_enabled_count > 0) {
+ pm_runtime_disable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count--;
+ }
+ }
+
+ r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL);
+ if (r)
+ dev_notice(svsb->dev, "set normal mode fail: %d\n", r);
+
+ r = regulator_disable(svsb->buck);
+ if (r)
+ dev_err(svsb->dev, "%s disable fail: %d\n",
+ svsb->buck_name, r);
+ }
+
+svs_init01_resume_cpuidle:
+ cpuidle_resume_and_unlock();
+
+ return ret;
+}
+
+static int svs_init02(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ reinit_completion(&svsb->init_completion);
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT02);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init02 completion timeout\n");
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * 2-line high/low bank update its corresponding opp voltages only.
+ * Therefore, we sync voltages from opp for high/low bank voltages
+ * consistency.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ if (svs_sync_bank_volts_from_opp(svsb)) {
+ dev_err(svsb->dev, "sync volt fail\n");
+ return -EPERM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void svs_mon_mode(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_MON))
+ continue;
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_MON);
+ spin_unlock_irqrestore(&svs_lock, flags);
+ }
+}
+
+static int svs_start(struct svs_platform *svsp)
+{
+ int ret;
+
+ ret = svs_init01(svsp);
+ if (ret)
+ return ret;
+
+ ret = svs_init02(svsp);
+ if (ret)
+ return ret;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+}
+
+static int svs_suspend(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ struct svs_bank *svsb;
+ unsigned long flags;
+ int ret;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ /* This might wait for svs_isr() process */
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+ }
+
+ ret = reset_control_assert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot assert reset %d\n", ret);
+ return ret;
+ }
+
+ clk_disable_unprepare(svsp->main_clk);
+
+ return 0;
+}
+
+static int svs_resume(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main_clk, disable svs\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot deassert reset %d\n", ret);
+ goto out_of_resume;
+ }
+
+ ret = svs_init02(svsp);
+ if (ret)
+ goto out_of_resume;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+
+out_of_resume:
+ clk_disable_unprepare(svsp->main_clk);
+ return ret;
+}
+
+static int svs_bank_resource_setup(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int count, ret;
+ u32 idx, i;
+
+ dev_set_drvdata(svsp->dev, svsp);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->name = "SVSB_CPU_LITTLE";
+ break;
+ case SVSB_CPU_BIG:
+ svsb->name = "SVSB_CPU_BIG";
+ break;
+ case SVSB_CCI:
+ svsb->name = "SVSB_CCI";
+ break;
+ case SVSB_GPU:
+ if (svsb->type == SVSB_HIGH)
+ svsb->name = "SVSB_GPU_HIGH";
+ else if (svsb->type == SVSB_LOW)
+ svsb->name = "SVSB_GPU_LOW";
+ else
+ svsb->name = "SVSB_GPU";
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev),
+ GFP_KERNEL);
+ if (!svsb->dev)
+ return -ENOMEM;
+
+ ret = dev_set_name(svsb->dev, "%s", svsb->name);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(svsb->dev, svsp);
+
+ ret = dev_pm_opp_of_add_table(svsb->opp_dev);
+ if (ret) {
+ dev_err(svsb->dev, "add opp table fail: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&svsb->lock);
+ init_completion(&svsb->init_completion);
+
+ if (svsb->mode_support & SVSB_MODE_INIT01) {
+ svsb->buck = devm_regulator_get_optional(svsb->opp_dev,
+ svsb->buck_name);
+ if (IS_ERR(svsb->buck)) {
+ dev_err(svsb->dev, "cannot get \"%s-supply\"\n",
+ svsb->buck_name);
+ return PTR_ERR(svsb->buck);
+ }
+ }
+
+ if (svsb->mode_support & SVSB_MODE_MON) {
+ svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name);
+ if (IS_ERR(svsb->tzd)) {
+ dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
+ svsb->tzone_name);
+ return PTR_ERR(svsb->tzd);
+ }
+ }
+
+ count = dev_pm_opp_get_opp_count(svsb->opp_dev);
+ if (svsb->opp_count != count) {
+ dev_err(svsb->dev,
+ "opp_count not \"%u\" but get \"%d\"?\n",
+ svsb->opp_count, count);
+ return count;
+ }
+
+ for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) {
+ opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ svsb->opp_dfreq[i] = freq;
+ svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp);
+ svsb->freq_pct[i] = percent(svsb->opp_dfreq[i],
+ svsb->freq_base);
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ return 0;
+}
+
+static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct nvmem_cell *cell;
+ u32 idx, i, vmin, golden_temp;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[9]) {
+ dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (vmin == 0x1)
+ svsb->vmin = 0x1e;
+
+ if (svsb->type == SVSB_LOW) {
+ svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0);
+ } else if (svsb->type == SVSB_HIGH) {
+ svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0);
+ }
+
+ svsb->vmax += svsb->dvt_fixed;
+ }
+
+ /* Thermal efuse parsing */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR_OR_NULL(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ return false;
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ return false;
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse[i] != 0)
+ break;
+
+ if (i == svsp->tefuse_max)
+ golden_temp = 50; /* All thermal efuse data are 0 */
+ else
+ golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = 500;
+ svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ }
+
+ return true;
+}
+
+static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct nvmem_cell *cell;
+ int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
+ int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
+ int o_slope, o_slope_sign, ts_id;
+ u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[2]) {
+ dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (ft_pgm <= 1)
+ svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->bdes = svsp->efuse[16] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_CPU_BIG:
+ svsb->bdes = svsp->efuse[18] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 15;
+ else
+ svsb->volt_od += 12;
+ break;
+ case SVSB_CCI:
+ svsb->bdes = svsp->efuse[4] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_GPU:
+ svsb->bdes = svsp->efuse[6] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
+
+ if (ft_pgm >= 2) {
+ svsb->freq_base = 800000000; /* 800MHz */
+ svsb->dvt_fixed = 2;
+ }
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return false;
+ }
+ }
+
+ /* Get thermal efuse by nvmem */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ /* Thermal efuse parsing */
+ adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
+ adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0);
+
+ o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0);
+ o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0);
+ o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0);
+ o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0);
+ o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0);
+ o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0);
+
+ degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0);
+ adc_cali_en_t = svsp->tefuse[0] & BIT(0);
+ o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0);
+
+ ts_id = (svsp->tefuse[1] >> 9) & BIT(0);
+ o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0);
+
+ if (adc_cali_en_t == 1) {
+ if (!ts_id)
+ o_slope = 0;
+
+ if (adc_ge_t < 265 || adc_ge_t > 758 ||
+ adc_oe_t < 265 || adc_oe_t > 758 ||
+ o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 ||
+ o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 ||
+ o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 ||
+ o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 ||
+ o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 ||
+ o_vtsabb < -8 || o_vtsabb > 484 ||
+ degc_cali < 1 || degc_cali > 63) {
+ dev_err(svsp->dev, "bad thermal efuse, no mon mode\n");
+ goto remove_mt8183_svsb_mon_mode;
+ }
+ } else {
+ dev_err(svsp->dev, "no thermal efuse, no mon mode\n");
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ ge = ((adc_ge_t - 512) * 10000) / 4096;
+ oe = (adc_oe_t - 512);
+ gain = (10000 + ge);
+
+ format[0] = (o_vtsmcu[0] + 3350 - oe);
+ format[1] = (o_vtsmcu[1] + 3350 - oe);
+ format[2] = (o_vtsmcu[2] + 3350 - oe);
+ format[3] = (o_vtsmcu[3] + 3350 - oe);
+ format[4] = (o_vtsmcu[4] + 3350 - oe);
+ format[5] = (o_vtsabb + 3350 - oe);
+
+ for (i = 0; i < 6; i++)
+ x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain;
+
+ temp0 = (10000 * 100000 / gain) * 15 / 18;
+
+ if (!o_slope_sign)
+ mts = (temp0 * 10) / (1534 + o_slope * 10);
+ else
+ mts = (temp0 * 10) / (1534 - o_slope * 10);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = mts;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_CPU_BIG:
+ tb_roomt = x_roomt[4];
+ break;
+ case SVSB_CCI:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_GPU:
+ tb_roomt = x_roomt[1];
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ temp0 = (degc_cali * 10 / 2);
+ temp1 = ((10000 * 100000 / 4096 / gain) *
+ oe + tb_roomt * 10) * 15 / 18;
+
+ if (!o_slope_sign)
+ temp2 = temp1 * 100 / (1534 + o_slope * 10);
+ else
+ temp2 = temp1 * 100 / (1534 - o_slope * 10);
+
+ svsb->bts = (temp0 + temp2 - 250) * 4 / 10;
+ }
+
+ return true;
+
+remove_mt8183_svsb_mon_mode:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mode_support &= ~SVSB_MODE_MON;
+ }
+
+ return true;
+}
+
+static bool svs_is_efuse_data_correct(struct svs_platform *svsp)
+{
+ struct nvmem_cell *cell;
+
+ /* Get svs efuse by nvmem */
+ cell = nvmem_cell_get(svsp->dev, "svs-calibration-data");
+ if (IS_ERR(cell)) {
+ dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ return false;
+ }
+
+ svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max);
+ if (IS_ERR(svsp->efuse)) {
+ dev_err(svsp->dev, "cannot read svs efuse: %ld\n",
+ PTR_ERR(svsp->efuse));
+ nvmem_cell_put(cell);
+ return false;
+ }
+
+ svsp->efuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ return svsp->efuse_parsing(svsp);
+}
+
+static struct device *svs_get_subsys_device(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, node_name);
+ if (!np) {
+ dev_err(svsp->dev, "cannot find %s node\n", node_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ of_node_put(np);
+ dev_err(svsp->dev, "cannot find pdev by %s\n", node_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ of_node_put(np);
+
+ return &pdev->dev;
+}
+
+static struct device *svs_add_device_link(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct device *dev;
+ struct device_link *sup_link;
+
+ if (!node_name) {
+ dev_err(svsp->dev, "node name cannot be null\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev = svs_get_subsys_device(svsp, node_name);
+ if (IS_ERR(dev))
+ return dev;
+
+ sup_link = device_link_add(svsp->dev, dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!sup_link) {
+ dev_err(svsp->dev, "sup_link is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dev;
+}
+
+static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
+ if (IS_ERR(svsp->rst))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst),
+ "cannot get svs reset control\n");
+
+ dev = svs_add_device_link(svsp, "lvts");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get lvts device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->type == SVSB_HIGH)
+ svsb->opp_dev = svs_add_device_link(svsp, "mali");
+ else if (svsb->type == SVSB_LOW)
+ svsb->opp_dev = svs_get_subsys_device(svsp, "mali");
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ dev = svs_add_device_link(svsp, "thermal");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get thermal device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ case SVSB_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(svsb->cpu_id);
+ break;
+ case SVSB_CCI:
+ svsb->opp_dev = svs_add_device_link(svsp, "cci");
+ break;
+ case SVSB_GPU:
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static struct svs_bank svs_mt8192_banks[] = {
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 688000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0100,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu1",
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
+ SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 902000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x6,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0101,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ },
+};
+
+static struct svs_bank svs_mt8183_banks[] = {
+ {
+ .sw_id = SVSB_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0000,
+ .int_st = BIT(0),
+ .ctl0 = 0x00010001,
+ },
+ {
+ .sw_id = SVSB_CPU_BIG,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 4,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x58,
+ .vmin = 0x10,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0001,
+ .int_st = BIT(1),
+ .ctl0 = 0x00000001,
+ },
+ {
+ .sw_id = SVSB_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1196000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0002,
+ .int_st = BIT(2),
+ .ctl0 = 0x00100003,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "mali",
+ .tzone_name = "tzts2",
+ .volt_flags = SVSB_INIT01_PD_REQ |
+ SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 |
+ SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 900000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x40,
+ .vmin = 0x14,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x3,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0003,
+ .int_st = BIT(3),
+ .ctl0 = 0x00050001,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 3,
+ },
+};
+
+static const struct svs_platform_data svs_mt8192_platform_data = {
+ .name = "mt8192-svs",
+ .banks = svs_mt8192_banks,
+ .efuse_parsing = svs_mt8192_efuse_parsing,
+ .probe = svs_mt8192_platform_probe,
+ .irqflags = IRQF_TRIGGER_HIGH,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8192_banks),
+};
+
+static const struct svs_platform_data svs_mt8183_platform_data = {
+ .name = "mt8183-svs",
+ .banks = svs_mt8183_banks,
+ .efuse_parsing = svs_mt8183_efuse_parsing,
+ .probe = svs_mt8183_platform_probe,
+ .irqflags = IRQF_TRIGGER_LOW,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8183_banks),
+};
+
+static const struct of_device_id svs_of_match[] = {
+ {
+ .compatible = "mediatek,mt8192-svs",
+ .data = &svs_mt8192_platform_data,
+ }, {
+ .compatible = "mediatek,mt8183-svs",
+ .data = &svs_mt8183_platform_data,
+ }, {
+ /* Sentinel */
+ },
+};
+
+static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
+{
+ struct svs_platform *svsp;
+ const struct svs_platform_data *svsp_data;
+ int ret;
+
+ svsp_data = of_device_get_match_data(&pdev->dev);
+ if (!svsp_data) {
+ dev_err(&pdev->dev, "no svs platform data?\n");
+ return ERR_PTR(-EPERM);
+ }
+
+ svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL);
+ if (!svsp)
+ return ERR_PTR(-ENOMEM);
+
+ svsp->dev = &pdev->dev;
+ svsp->name = svsp_data->name;
+ svsp->banks = svsp_data->banks;
+ svsp->efuse_parsing = svsp_data->efuse_parsing;
+ svsp->probe = svsp_data->probe;
+ svsp->irqflags = svsp_data->irqflags;
+ svsp->regs = svsp_data->regs;
+ svsp->bank_max = svsp_data->bank_max;
+
+ ret = svsp->probe(svsp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return svsp;
+}
+
+static int svs_probe(struct platform_device *pdev)
+{
+ struct svs_platform *svsp;
+ unsigned int svsp_irq;
+ int ret;
+
+ svsp = svs_platform_probe(pdev);
+ if (IS_ERR(svsp))
+ return PTR_ERR(svsp);
+
+ if (!svs_is_efuse_data_correct(svsp)) {
+ dev_notice(svsp->dev, "efuse data isn't correct\n");
+ ret = -EPERM;
+ goto svs_probe_free_resource;
+ }
+
+ ret = svs_bank_resource_setup(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0);
+ ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+ svsp->irqflags | IRQF_ONESHOT,
+ svsp->name, svsp);
+ if (ret) {
+ dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+ svsp_irq, ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp->main_clk = devm_clk_get(svsp->dev, "main");
+ if (IS_ERR(svsp->main_clk)) {
+ dev_err(svsp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(svsp->main_clk));
+ ret = PTR_ERR(svsp->main_clk);
+ goto svs_probe_free_resource;
+ }
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main clk: %d\n", ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp->base = of_iomap(svsp->dev->of_node, 0);
+ if (IS_ERR_OR_NULL(svsp->base)) {
+ dev_err(svsp->dev, "cannot find svs register base\n");
+ ret = -EINVAL;
+ goto svs_probe_clk_disable;
+ }
+
+ ret = svs_start(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs start fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+
+ ret = svs_create_debug_cmds(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+
+ return 0;
+
+svs_probe_iounmap:
+ iounmap(svsp->base);
+
+svs_probe_clk_disable:
+ clk_disable_unprepare(svsp->main_clk);
+
+svs_probe_free_resource:
+ if (!IS_ERR_OR_NULL(svsp->efuse))
+ kfree(svsp->efuse);
+ if (!IS_ERR_OR_NULL(svsp->tefuse))
+ kfree(svsp->tefuse);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume);
+
+static struct platform_driver svs_driver = {
+ .probe = svs_probe,
+ .driver = {
+ .name = "mtk-svs",
+ .pm = &svs_pm_ops,
+ .of_match_table = of_match_ptr(svs_of_match),
+ },
+};
+
+module_platform_driver(svs_driver);
+
+MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SVS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e718b8735444..e0d7a5459562 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -129,7 +129,10 @@ config QCOM_RPMHPD
config QCOM_RPMPD
tristate "Qualcomm RPM Power domain driver"
+ depends on PM
depends on QCOM_SMD_RPM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
help
QCOM RPM Power domain driver to support power-domains with
performance states. The driver communicates a performance state
@@ -228,4 +231,19 @@ config QCOM_APR
application processor and QDSP6. APR is
used by audio driver to configure QDSP6
ASM, ADM and AFE modules.
+
+config QCOM_ICC_BWMON
+ tristate "QCOM Interconnect Bandwidth Monitor driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select PM_OPP
+ help
+ Sets up driver monitoring bandwidth on various interconnects and
+ based on that voting for interconnect bandwidth, adjusting their
+ speed to current demand.
+ Current implementation brings support for BWMON v4, used for example
+ on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last
+ Level Cache (memnoc). Usage of this BWMON allows to remove some of
+ the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
+ memory throughput even with lower CPU frequencies.
+
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 70d5de69fd7b..d66604aff2b0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
+obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 3caabd873322..b4046f393575 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -377,17 +377,14 @@ static int apr_device_probe(struct device *dev)
static void apr_device_remove(struct device *dev)
{
struct apr_device *adev = to_apr_device(dev);
- struct apr_driver *adrv;
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
- if (dev->driver) {
- adrv = to_apr_driver(dev->driver);
- if (adrv->remove)
- adrv->remove(adev);
- spin_lock(&apr->svcs_lock);
- idr_remove(&apr->svcs_idr, adev->svc.id);
- spin_unlock(&apr->svcs_lock);
- }
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc.id);
+ spin_unlock(&apr->svcs_lock);
}
static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index dd872017f345..629a7188b576 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -141,13 +141,17 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh,
const struct rsc_hdr *rsc_hdr;
const struct entry_header *ent;
int ret, i, j;
- u8 query[8];
+ u8 query[sizeof(ent->id)] __nonstring;
ret = cmd_db_ready();
if (ret)
return ret;
- /* Pad out query string to same length as in DB */
+ /*
+ * Pad out query string to same length as in DB. NOTE: the output
+ * query string is not necessarily '\0' terminated if it bumps up
+ * against the max size. That's OK and expected.
+ */
strncpy(query, id, sizeof(query));
for (i = 0; i < MAX_SLV_ID; i++) {
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
new file mode 100644
index 000000000000..7f8aca533cd3
--- /dev/null
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2021-2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
+ * previous work of Thara Gopinath and msm-4.9 downstream sources.
+ */
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sizes.h>
+
+/*
+ * The BWMON samples data throughput within 'sample_ms' time. With three
+ * configurable thresholds (Low, Medium and High) gives four windows (called
+ * zones) of current bandwidth:
+ *
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HIGH
+ * Zone 3: THRES_HIGH < byte count
+ *
+ * Zones 0 and 2 are not used by this driver.
+ */
+
+/* Internal sampling clock frequency */
+#define HW_TIMER_HZ 19200000
+
+#define BWMON_GLOBAL_IRQ_STATUS 0x0
+#define BWMON_GLOBAL_IRQ_CLEAR 0x8
+#define BWMON_GLOBAL_IRQ_ENABLE 0xc
+#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+#define BWMON_IRQ_STATUS 0x100
+#define BWMON_IRQ_STATUS_ZONE_SHIFT 4
+#define BWMON_IRQ_CLEAR 0x108
+#define BWMON_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5
+#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6
+#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7
+#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \
+ BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT))
+
+#define BWMON_ENABLE 0x2a0
+#define BWMON_ENABLE_ENABLE BIT(0)
+
+#define BWMON_CLEAR 0x2a4
+#define BWMON_CLEAR_CLEAR BIT(0)
+
+#define BWMON_SAMPLE_WINDOW 0x2a8
+#define BWMON_THRESHOLD_HIGH 0x2ac
+#define BWMON_THRESHOLD_MED 0x2b0
+#define BWMON_THRESHOLD_LOW 0x2b4
+
+#define BWMON_ZONE_ACTIONS 0x2b8
+/*
+ * Actions to perform on some zone 'z' when current zone hits the threshold:
+ * Increment counter of zone 'z'
+ */
+#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2))
+/* Clear counter of zone 'z' */
+#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2))
+
+/* Zone 0 threshold hit: Clear zone count */
+#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 1 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 2 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 3 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \
+ BWMON_ZONE_ACTIONS_CLEAR(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+/* Value for BWMON_ZONE_ACTIONS */
+#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \
+ BWMON_ZONE_ACTIONS_ZONE1 << 8 | \
+ BWMON_ZONE_ACTIONS_ZONE2 << 16 | \
+ BWMON_ZONE_ACTIONS_ZONE3 << 24)
+
+/*
+ * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT
+ * register. Based on observations, this is number of times one threshold has to
+ * be reached, to trigger interrupt in given zone.
+ *
+ * 0xff are maximum values meant to ignore the zones 0 and 2.
+ */
+#define BWMON_THRESHOLD_COUNT 0x2bc
+#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8
+#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16
+#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24
+#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
+#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
+
+/* BWMONv4 count registers use count unit of 64 kB */
+#define BWMON_COUNT_UNIT_KB 64
+#define BWMON_ZONE_COUNT 0x2d8
+#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+
+struct icc_bwmon_data {
+ unsigned int sample_ms;
+ unsigned int default_highbw_kbps;
+ unsigned int default_medbw_kbps;
+ unsigned int default_lowbw_kbps;
+ u8 zone1_thres_count;
+ u8 zone3_thres_count;
+};
+
+struct icc_bwmon {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ unsigned int default_lowbw_kbps;
+ unsigned int sample_ms;
+ unsigned int max_bw_kbps;
+ unsigned int min_bw_kbps;
+ unsigned int target_kbps;
+ unsigned int current_kbps;
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon)
+{
+ /*
+ * Clear counters. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * The counter clear and IRQ clear bits are not in the same 4KB
+ * region. So, we need to make sure the counter clear is completed
+ * before we try to clear the IRQ or do any other counter operations.
+ */
+ writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR);
+}
+
+static void bwmon_clear_irq(struct icc_bwmon *bwmon)
+{
+ /*
+ * Clear zone and global interrupts. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ *
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
+ writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR);
+ writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR);
+}
+
+static void bwmon_disable(struct icc_bwmon *bwmon)
+{
+ /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
+ writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
+ writel(0x0, bwmon->base + BWMON_IRQ_ENABLE);
+
+ /*
+ * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
+ * IRQ.
+ */
+ writel(0x0, bwmon->base + BWMON_ENABLE);
+}
+
+static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
+{
+ /* Enable interrupts */
+ writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE,
+ bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
+ writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE);
+
+ /* Enable bwmon */
+ writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE);
+}
+
+static unsigned int bwmon_kbps_to_count(unsigned int kbps)
+{
+ return kbps / BWMON_COUNT_UNIT_KB;
+}
+
+static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg,
+ unsigned int kbps)
+{
+ unsigned int thres;
+
+ thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms,
+ MSEC_PER_SEC);
+ writel_relaxed(thres, bwmon->base + reg);
+}
+
+static void bwmon_start(struct icc_bwmon *bwmon,
+ const struct icc_bwmon_data *data)
+{
+ unsigned int thres_count;
+ int window;
+
+ bwmon_clear_counters(bwmon);
+
+ window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xfffff */
+ writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW);
+
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH,
+ data->default_highbw_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED,
+ data->default_medbw_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW,
+ data->default_lowbw_kbps);
+
+ thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT |
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT |
+ data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT |
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT;
+ writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT);
+ writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT,
+ bwmon->base + BWMON_ZONE_ACTIONS);
+ /* Write barriers in bwmon_clear_irq() */
+
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
+}
+
+static irqreturn_t bwmon_intr(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int status, max;
+ int zone;
+
+ status = readl(bwmon->base + BWMON_IRQ_STATUS);
+ status &= BWMON_IRQ_ENABLE_MASK;
+ if (!status) {
+ /*
+ * Only zone 1 and zone 3 interrupts are enabled but zone 2
+ * threshold could be hit and trigger interrupt even if not
+ * enabled.
+ * Such spurious interrupt might come with valuable max count or
+ * not, so solution would be to always check all
+ * BWMON_ZONE_MAX() registers to find the highest value.
+ * Such case is currently ignored.
+ */
+ return IRQ_NONE;
+ }
+
+ bwmon_disable(bwmon);
+
+ zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1;
+ /*
+ * Zone max bytes count register returns count units within sampling
+ * window. Downstream kernel for BWMONv4 (called BWMON type 2 in
+ * downstream) always increments the max bytes count by one.
+ */
+ max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1;
+ max *= BWMON_COUNT_UNIT_KB;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int irq_enable = 0;
+ struct dev_pm_opp *opp, *target_opp;
+ unsigned int bw_kbps, up_kbps, down_kbps;
+
+ bw_kbps = bwmon->target_kbps;
+
+ target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
+ target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+
+ bwmon->target_kbps = bw_kbps;
+
+ bw_kbps--;
+ opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ down_kbps = bwmon->target_kbps;
+ else
+ down_kbps = bw_kbps;
+
+ up_kbps = bwmon->target_kbps + 1;
+
+ if (bwmon->target_kbps >= bwmon->max_bw_kbps)
+ irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT);
+ else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
+ irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT);
+ else
+ irq_enable = BWMON_IRQ_ENABLE_MASK;
+
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps);
+ /* Write barriers in bwmon_clear_counters() */
+ bwmon_clear_counters(bwmon);
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, irq_enable);
+
+ if (bwmon->target_kbps == bwmon->current_kbps)
+ goto out;
+
+ dev_pm_opp_set_opp(bwmon->dev, target_opp);
+ bwmon->current_kbps = bwmon->target_kbps;
+
+out:
+ dev_pm_opp_put(target_opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ return IRQ_HANDLED;
+}
+
+static int bwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct icc_bwmon *bwmon;
+ const struct icc_bwmon_data *data;
+ int ret;
+
+ bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
+ if (!bwmon)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(dev);
+
+ bwmon->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bwmon->base)) {
+ dev_err(dev, "failed to map bwmon registers\n");
+ return PTR_ERR(bwmon->base);
+ }
+
+ bwmon->irq = platform_get_irq(pdev, 0);
+ if (bwmon->irq < 0)
+ return bwmon->irq;
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
+
+ bwmon->max_bw_kbps = UINT_MAX;
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
+
+ bwmon->sample_ms = data->sample_ms;
+ bwmon->default_lowbw_kbps = data->default_lowbw_kbps;
+ bwmon->dev = dev;
+
+ bwmon_disable(bwmon);
+ ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
+ bwmon_intr_thread,
+ IRQF_ONESHOT, dev_name(dev), bwmon);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ\n");
+
+ platform_set_drvdata(pdev, bwmon);
+ bwmon_start(bwmon, data);
+
+ return 0;
+}
+
+static int bwmon_remove(struct platform_device *pdev)
+{
+ struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
+
+ bwmon_disable(bwmon);
+
+ return 0;
+}
+
+/* BWMON v4 */
+static const struct icc_bwmon_data msm8998_bwmon_data = {
+ .sample_ms = 4,
+ .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
+ .default_medbw_kbps = 512 * 1024, /* 512 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+};
+
+static const struct of_device_id bwmon_of_match[] = {
+ { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bwmon_of_match);
+
+static struct platform_driver bwmon_driver = {
+ .probe = bwmon_probe,
+ .remove = bwmon_remove,
+ .driver = {
+ .name = "qcom-bwmon",
+ .of_match_table = bwmon_of_match,
+ },
+};
+module_platform_driver(bwmon_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("QCOM BWMON driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 4b143cf7b4ce..38d7296315a2 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -382,7 +382,7 @@ static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
* llcc_slice_getd - get llcc slice descriptor
* @uid: usecase_id for the client
*
- * A pointer to llcc slice descriptor will be returned on success and
+ * A pointer to llcc slice descriptor will be returned on success
* and error pointer is returned on failure
*/
struct llcc_slice_desc *llcc_slice_getd(u32 uid)
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 366db493579b..3f11554df2f3 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -108,6 +108,8 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
* qcom_mdt_read_metadata() - read header and metadata from mdt or mbn
* @fw: firmware of mdt header or mbn
* @data_len: length of the read metadata blob
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @dev: device handle to associate resources with
*
* The mechanism that performs the authentication of the loading firmware
* expects an ELF header directly followed by the segment of hashes, with no
@@ -192,7 +194,7 @@ EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
* qcom_mdt_pas_init() - initialize PAS region for firmware loading
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
+ * @fw_name: name of the firmware, for construction of segment file names
* @pas_id: PAS identifier
* @mem_phys: physical address of allocated memory region
* @ctx: PAS metadata context, to be released by caller
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 97fd24c178f8..c92d26b73e6f 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -194,14 +194,17 @@ struct ocmem *of_get_ocmem(struct device *dev)
devnode = of_parse_phandle(dev->of_node, "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
+ of_node_put(devnode);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ of_node_put(devnode);
return ERR_PTR(-EPROBE_DEFER);
}
+ of_node_put(devnode);
ocmem = platform_get_drvdata(pdev);
if (!ocmem) {
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index a59bb34e5eba..18c856056475 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -399,8 +399,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
goto unroll;
+ }
}
if (!count)
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 05fff8691ee3..092f6ab09acf 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -23,8 +23,8 @@
/**
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
- * @pd: generic_pm_domain corrresponding to the power domain
- * @parent: generic_pm_domain corrresponding to the parent's power domain
+ * @pd: generic_pm_domain corresponding to the power domain
+ * @parent: generic_pm_domain corresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 3b5b91621532..5803038c744e 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -453,6 +453,7 @@ static const struct rpmpd_desc qcm2290_desc = {
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
+ { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 30dda1af63c8..413f9f4ae9cd 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -234,6 +234,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-ipq6018" },
{ .compatible = "qcom,rpm-msm8226" },
+ { .compatible = "qcom,rpm-msm8909" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8953" },
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 59dbf4b61e6c..d9c28a8a7cbf 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -119,6 +119,9 @@ struct smp2p_entry {
* @out: pointer to the outbound smem item
* @smem_items: ids of the two smem items
* @valid_entries: already scanned inbound entries
+ * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
+ * @ssr_ack: current cached state of the local ack bit
+ * @negotiation_done: whether negotiating finished
* @local_pid: processor id of the inbound edge
* @remote_pid: processor id of the outbound edge
* @ipc_regmap: regmap for the outbound ipc
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index cee579a267a6..4554fb8655d3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -328,10 +328,12 @@ static const struct soc_id soc_id[] = {
{ 455, "QRB5165" },
{ 457, "SM8450" },
{ 459, "SM7225" },
- { 460, "SA8540P" },
+ { 460, "SA8295P" },
+ { 461, "SA8540P" },
{ 480, "SM8450" },
{ 482, "SM8450" },
{ 487, "SC7280" },
+ { 495, "SC7180P" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index f831420b7fd4..484b42b7454e 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -74,6 +74,18 @@ static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
[SPM_REG_SEQ_ENTRY] = 0x400,
};
+/* SPM register data for 8909 */
+static const struct spm_reg_data spm_reg_8909_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80,
+ 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
/* SPM register data for 8916 */
static const struct spm_reg_data spm_reg_8916_cpu = {
.reg_offset = spm_reg_offset_v3_0,
@@ -195,6 +207,8 @@ static const struct of_device_id spm_match_table[] = {
.data = &spm_reg_660_silver_l2 },
{ .compatible = "qcom,msm8226-saw2-v2.1-cpu",
.data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8909-saw2-v3.0-cpu",
+ .data = &spm_reg_8909_cpu },
{ .compatible = "qcom,msm8916-saw2-v3.0-cpu",
.data = &spm_reg_8916_cpu },
{ .compatible = "qcom,msm8974-saw2-v2.1-cpu",
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
index fdfc857df334..04f1bc322ae7 100644
--- a/drivers/soc/renesas/r8a779a0-sysc.c
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -57,11 +57,11 @@ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
{ "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
{ "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
{ "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
- { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
- { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
- { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
- { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
- { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR },
+ { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR },
+ { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR },
+ { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR },
+ { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR },
{ "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
{ "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
{ "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
index fe2d98254754..388cfa8f8f9f 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.h
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -25,8 +25,8 @@
struct rcar_gen4_sysc_area {
const char *name;
u8 pdr; /* PDRn */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
+ s8 parent; /* -1 if none */
+ u8 flags; /* See PD_* */
};
/*
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 8d861c1cfdf7..266c599a0a9b 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -31,8 +31,8 @@ struct rcar_sysc_area {
u16 chan_offs; /* Offset of PWRSR register for this area */
u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */
u8 isr_bit; /* Bit in SYSCI*R */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
+ s8 parent; /* -1 if none */
+ u8 flags; /* See PD_* */
};
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index 1fef0e711056..8aecbc9b1976 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -6,6 +6,7 @@
config SUNXI_MBUS
bool
default ARCH_SUNXI
+ depends on ARM || ARM64
help
Say y to enable the fixups needed to support the Allwinner
MBUS DMA quirks.
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 0e4ba0f89533..6882c86b3ce5 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = {
{ .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
{},
};
MODULE_DEVICE_TABLE(of, pruss_of_match);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 0076d467ff6b..343c58ed5896 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -688,7 +688,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
&m3_ipc->sd_fw_name);
if (ret) {
dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
- };
+ }
/*
* Wait for firmware loading completion in a thread so we
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3b1044ebc400..35ce57878b27 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -183,7 +183,7 @@ config SPI_BCM63XX
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
- depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST
+ depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
help
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a452748c69b2..7172cd1792df 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1099,8 +1099,8 @@ config SERIAL_TIMBERDALE
config SERIAL_BCM63XX
tristate "Broadcom BCM63xx/BCM33xx UART support"
select SERIAL_CORE
- depends on ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
- default ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
+ depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
help
This enables the driver for the onchip UART core found on
the following chipsets:
diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h
index bd4c3086a2da..bab85d9ba8cd 100644
--- a/include/dt-bindings/clock/tegra234-clock.h
+++ b/include/dt-bindings/clock/tegra234-clock.h
@@ -164,10 +164,111 @@
#define TEGRA234_CLK_PEX1_C5_CORE 225U
/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
#define TEGRA234_CLK_PLLC4 237U
+/** @brief RX clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_INPUT 248U
+/** @brief RX clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_INPUT 249U
+/** @brief RX clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_INPUT 250U
+/** @brief RX clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_INPUT 251U
/** @brief 32K input clock provided by PMIC */
#define TEGRA234_CLK_CLK_32K 289U
+/** @brief Monitored branch of MBGE0 RX input clock */
+#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U
+/** @brief Monitored branch of MBGE1 RX input clock */
+#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U
+/** @brief Monitored branch of MBGE2 RX input clock */
+#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U
+/** @brief Monitored branch of MBGE3 RX input clock */
+#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U
+/** @brief Monitored branch of MGBE0 RX PCS mux output */
+#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U
+/** @brief Monitored branch of MGBE1 RX PCS mux output */
+#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U
+/** @brief Monitored branch of MGBE2 RX PCS mux output */
+#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U
+/** @brief Monitored branch of MGBE3 RX PCS mux output */
+#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U
+/** @brief RX PCS clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U
+/** @brief RX PCS clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U
+/** @brief RX PCS clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U
+/** @brief RX PCS clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U
+/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE0_RX_PCS 373U
+/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX 374U
+/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX_PCS 375U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MAC 377U
+/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MACSEC 378U
+/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE0_EEE_PCS 379U
+/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE0_APP 380U
+/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_PTP_REF 381U
+/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE1_RX_PCS 382U
+/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX 383U
+/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX_PCS 384U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MAC 386U
+/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE1_EEE_PCS 388U
+/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE1_APP 389U
+/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_PTP_REF 390U
+/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE2_RX_PCS 391U
+/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX 392U
+/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX_PCS 393U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MAC 395U
+/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE2_EEE_PCS 397U
+/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE2_APP 398U
+/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_PTP_REF 399U
+/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE3_RX_PCS 400U
+/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX 401U
+/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX_PCS 402U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MAC 404U
+/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MACSEC 405U
+/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE3_EEE_PCS 406U
+/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE3_APP 407U
+/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_PTP_REF 408U
/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */
#define TEGRA234_CLK_AZA_2XBIT 457U
/** @brief aza_2xbitclk / 2 (aza_bitclk) */
#define TEGRA234_CLK_AZA_BIT 458U
+
#endif
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
index e3b0e9da295d..8b0ddcb715ff 100644
--- a/include/dt-bindings/memory/tegra234-mc.h
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -11,11 +11,16 @@
/* NISO0 stream IDs */
#define TEGRA234_SID_APE 0x02
#define TEGRA234_SID_HDA 0x03
+#define TEGRA234_SID_GPCDMA 0x04
+#define TEGRA234_SID_MGBE 0x06
#define TEGRA234_SID_PCIE0 0x12
#define TEGRA234_SID_PCIE4 0x13
#define TEGRA234_SID_PCIE5 0x14
#define TEGRA234_SID_PCIE6 0x15
#define TEGRA234_SID_PCIE9 0x1f
+#define TEGRA234_SID_MGBE_VF1 0x49
+#define TEGRA234_SID_MGBE_VF2 0x4a
+#define TEGRA234_SID_MGBE_VF3 0x4b
/* NISO1 stream IDs */
#define TEGRA234_SID_SDMMC4 0x02
@@ -61,8 +66,24 @@
#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48
/* PCIE7r1 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49
+/* MGBE0 read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58
+/* MGBEB read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59
+/* MGBEC read client */
+#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a
+/* MGBED read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b
+/* MGBE0 write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c
+/* MGBEB write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f
+/* MGBEC write client */
+#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61
/* sdmmcd memory read client */
#define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63
+/* MGBED write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65
/* sdmmcd memory write client */
#define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67
/* BPMP read client */
diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h
new file mode 100644
index 000000000000..b0fc26cb1da4
--- /dev/null
+++ b/include/dt-bindings/power/mt6795-power.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H
+#define _DT_BINDINGS_POWER_MT6795_POWER_H
+
+#define MT6795_POWER_DOMAIN_MM 0
+#define MT6795_POWER_DOMAIN_VDEC 1
+#define MT6795_POWER_DOMAIN_VENC 2
+#define MT6795_POWER_DOMAIN_ISP 3
+#define MT6795_POWER_DOMAIN_MJC 4
+#define MT6795_POWER_DOMAIN_AUDIO 5
+#define MT6795_POWER_DOMAIN_MFG_ASYNC 6
+#define MT6795_POWER_DOMAIN_MFG_2D 7
+#define MT6795_POWER_DOMAIN_MFG 8
+#define MT6795_POWER_DOMAIN_MODEM 9
+
+#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 6cce5b7aa940..d81de63ae31c 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -187,6 +187,13 @@
#define MSM8916_VDDMX 3
#define MSM8916_VDDMX_AO 4
+/* MSM8909 Power Domain Indexes */
+#define MSM8909_VDDCX MSM8916_VDDCX
+#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO
+#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC
+#define MSM8909_VDDMX MSM8916_VDDMX
+#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
+
/* MSM8953 Power Domain Indexes */
#define MSM8953_VDDMD 0
#define MSM8953_VDDMD_AO 1
diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h
index f610eee9bce8..df1d4dd8dcf3 100644
--- a/include/dt-bindings/power/tegra234-powergate.h
+++ b/include/dt-bindings/power/tegra234-powergate.h
@@ -18,5 +18,6 @@
#define TEGRA234_POWER_DOMAIN_MGBEA 17U
#define TEGRA234_POWER_DOMAIN_MGBEB 18U
#define TEGRA234_POWER_DOMAIN_MGBEC 19U
+#define TEGRA234_POWER_DOMAIN_MGBED 20U
#endif
diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h
index 547ca3b60caa..bd58a05f1d94 100644
--- a/include/dt-bindings/reset/tegra234-reset.h
+++ b/include/dt-bindings/reset/tegra234-reset.h
@@ -15,6 +15,7 @@
#define TEGRA234_RESET_PEX1_COMMON_APB 13U
#define TEGRA234_RESET_PEX2_CORE_7 14U
#define TEGRA234_RESET_PEX2_CORE_7_APB 15U
+#define TEGRA234_RESET_GPCDMA 18U
#define TEGRA234_RESET_HDA 20U
#define TEGRA234_RESET_HDACODEC 21U
#define TEGRA234_RESET_I2C1 24U
@@ -29,6 +30,12 @@
#define TEGRA234_RESET_I2C7 33U
#define TEGRA234_RESET_I2C8 34U
#define TEGRA234_RESET_I2C9 35U
+#define TEGRA234_RESET_MGBE0_PCS 45U
+#define TEGRA234_RESET_MGBE0_MAC 46U
+#define TEGRA234_RESET_MGBE1_PCS 49U
+#define TEGRA234_RESET_MGBE1_MAC 50U
+#define TEGRA234_RESET_MGBE2_PCS 53U
+#define TEGRA234_RESET_MGBE2_MAC 54U
#define TEGRA234_RESET_PEX2_CORE_10 56U
#define TEGRA234_RESET_PEX2_CORE_10_APB 57U
#define TEGRA234_RESET_PEX2_COMMON_APB 58U
@@ -43,6 +50,8 @@
#define TEGRA234_RESET_QSPI0 76U
#define TEGRA234_RESET_QSPI1 77U
#define TEGRA234_RESET_SDMMC4 85U
+#define TEGRA234_RESET_MGBE3_PCS 87U
+#define TEGRA234_RESET_MGBE3_MAC 88U
#define TEGRA234_RESET_UARTA 100U
#define TEGRA234_RESET_PEX0_CORE_0 116U
#define TEGRA234_RESET_PEX0_CORE_1 117U
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
index ed37dc40e82a..f70a810c55f7 100644
--- a/include/linux/mfd/bcm2835-pm.h
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -9,6 +9,7 @@ struct bcm2835_pm {
struct device *dev;
void __iomem *base;
void __iomem *asb;
+ void __iomem *rpivid_asb;
};
#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 704111f63993..7f4f9df1b20f 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -561,6 +561,116 @@ struct scmi_voltage_proto_ops {
};
/**
+ * struct scmi_powercap_info - Describe one available Powercap domain
+ *
+ * @id: Domain ID as advertised by the platform.
+ * @notify_powercap_cap_change: CAP change notification support.
+ * @notify_powercap_measurement_change: MEASUREMENTS change notifications
+ * support.
+ * @async_powercap_cap_set: Asynchronous CAP set support.
+ * @powercap_cap_config: CAP configuration support.
+ * @powercap_monitoring: Monitoring (measurements) support.
+ * @powercap_pai_config: PAI configuration support.
+ * @powercap_scale_mw: Domain reports power data in milliwatt units.
+ * @powercap_scale_uw: Domain reports power data in microwatt units.
+ * Note that, when both @powercap_scale_mw and
+ * @powercap_scale_uw are set to false, the domain
+ * reports power data on an abstract linear scale.
+ * @name: name assigned to the Powercap Domain by platform.
+ * @min_pai: Minimum configurable PAI.
+ * @max_pai: Maximum configurable PAI.
+ * @pai_step: Step size between two consecutive PAI values.
+ * @min_power_cap: Minimum configurable CAP.
+ * @max_power_cap: Maximum configurable CAP.
+ * @power_cap_step: Step size between two consecutive CAP values.
+ * @sustainable_power: Maximum sustainable power consumption for this domain
+ * under normal conditions.
+ * @accuracy: The accuracy with which the power is measured and reported in
+ * integral multiples of 0.001 percent.
+ * @parent_id: Identifier of the containing parent power capping domain, or the
+ * value 0xFFFFFFFF if this powercap domain is a root domain not
+ * contained in any other domain.
+ */
+struct scmi_powercap_info {
+ unsigned int id;
+ bool notify_powercap_cap_change;
+ bool notify_powercap_measurement_change;
+ bool async_powercap_cap_set;
+ bool powercap_cap_config;
+ bool powercap_monitoring;
+ bool powercap_pai_config;
+ bool powercap_scale_mw;
+ bool powercap_scale_uw;
+ bool fastchannels;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int min_pai;
+ unsigned int max_pai;
+ unsigned int pai_step;
+ unsigned int min_power_cap;
+ unsigned int max_power_cap;
+ unsigned int power_cap_step;
+ unsigned int sustainable_power;
+ unsigned int accuracy;
+#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL
+ unsigned int parent_id;
+ struct scmi_fc_info *fc_info;
+};
+
+/**
+ * struct scmi_powercap_proto_ops - represents the various operations provided
+ * by SCMI Powercap Protocol
+ *
+ * @num_domains_get: get the count of powercap domains provided by SCMI.
+ * @info_get: get the information for the specified domain.
+ * @cap_get: get the current CAP value for the specified domain.
+ * @cap_set: set the CAP value for the specified domain to the provided value;
+ * if the domain supports setting the CAP with an asynchronous command
+ * this request will finally trigger an asynchronous transfer, but, if
+ * @ignore_dresp here is set to true, this call will anyway return
+ * immediately without waiting for the related delayed response.
+ * @pai_get: get the current PAI value for the specified domain.
+ * @pai_set: set the PAI value for the specified domain to the provided value.
+ * @measurements_get: retrieve the current average power measurements for the
+ * specified domain and the related PAI upon which is
+ * calculated.
+ * @measurements_threshold_set: set the desired low and high power thresholds
+ * to be used when registering for notification
+ * of type POWERCAP_MEASUREMENTS_NOTIFY with this
+ * powercap domain.
+ * Note that this must be called at least once
+ * before registering any callback with the usual
+ * @scmi_notify_ops; moreover, in case this method
+ * is called with measurement notifications already
+ * enabled it will also trigger, transparently, a
+ * proper update of the power thresholds configured
+ * in the SCMI backend server.
+ * @measurements_threshold_get: get the currently configured low and high power
+ * thresholds used when registering callbacks for
+ * notification POWERCAP_MEASUREMENTS_NOTIFY.
+ */
+struct scmi_powercap_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_powercap_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *power_cap);
+ int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 power_cap, bool ignore_dresp);
+ int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *pai);
+ int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 pai);
+ int (*measurements_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power, u32 *pai);
+ int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high);
+ int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high);
+};
+
+/**
* struct scmi_notify_ops - represents notifications' operations provided by
* SCMI core
* @devm_event_notifier_register: Managed registration of a notifier_block for
@@ -624,6 +734,9 @@ struct scmi_notify_ops {
*
* @dev: pointer to the SCMI device
* @version: pointer to the structure containing SCMI version information
+ * @devm_protocol_acquire: devres managed method to get hold of a protocol,
+ * causing its initialization and related resource
+ * accounting
* @devm_protocol_get: devres managed method to acquire a protocol and get specific
* operations and a dedicated protocol handler
* @devm_protocol_put: devres managed method to release a protocol
@@ -642,6 +755,8 @@ struct scmi_handle {
struct device *dev;
struct scmi_revision_info *version;
+ int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev,
+ u8 proto);
const void __must_check *
(*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
struct scmi_protocol_handle **ph);
@@ -661,6 +776,7 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_SENSOR = 0x15,
SCMI_PROTOCOL_RESET = 0x16,
SCMI_PROTOCOL_VOLTAGE = 0x17,
+ SCMI_PROTOCOL_POWERCAP = 0x18,
};
enum scmi_system_events {
@@ -762,6 +878,8 @@ enum scmi_notification_events {
SCMI_EVENT_RESET_ISSUED = 0x0,
SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0,
+ SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0,
+ SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1,
};
struct scmi_power_state_changed_report {
@@ -781,8 +899,10 @@ struct scmi_clock_rate_notif_report {
struct scmi_system_power_state_notifier_report {
ktime_t timestamp;
unsigned int agent_id;
+#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0))
unsigned int flags;
unsigned int system_state;
+ unsigned int timeout;
};
struct scmi_perf_limits_report {
@@ -830,4 +950,18 @@ struct scmi_base_error_report {
unsigned long long reports[];
};
+struct scmi_powercap_cap_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_cap;
+ unsigned int pai;
+};
+
+struct scmi_powercap_meas_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power;
+};
#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
index 6fe4ffbde290..a0f4f51a3b45 100644
--- a/include/linux/soc/mediatek/mtk-mutex.h
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -10,11 +10,33 @@ struct regmap;
struct device;
struct mtk_mutex;
+enum mtk_mutex_mod_index {
+ /* MDP table index */
+ MUTEX_MOD_IDX_MDP_RDMA0,
+ MUTEX_MOD_IDX_MDP_RSZ0,
+ MUTEX_MOD_IDX_MDP_RSZ1,
+ MUTEX_MOD_IDX_MDP_TDSHP0,
+ MUTEX_MOD_IDX_MDP_WROT0,
+ MUTEX_MOD_IDX_MDP_WDMA,
+ MUTEX_MOD_IDX_MDP_AAL0,
+ MUTEX_MOD_IDX_MDP_CCORR0,
+
+ MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */
+};
+
+enum mtk_mutex_sof_index {
+ MUTEX_SOF_IDX_SINGLE_MODE,
+
+ MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev);
int mtk_mutex_prepare(struct mtk_mutex *mutex);
void mtk_mutex_add_comp(struct mtk_mutex *mutex,
enum mtk_ddp_comp_id id);
void mtk_mutex_enable(struct mtk_mutex *mutex);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex,
+ void *pkt);
void mtk_mutex_disable(struct mtk_mutex *mutex);
void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
enum mtk_ddp_comp_id id);
@@ -22,5 +44,10 @@ void mtk_mutex_unprepare(struct mtk_mutex *mutex);
void mtk_mutex_put(struct mtk_mutex *mutex);
void mtk_mutex_acquire(struct mtk_mutex *mutex);
void mtk_mutex_release(struct mtk_mutex *mutex);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx,
+ bool clear);
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx);
#endif /* MTK_MUTEX_H */
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index cee4b2b64ae4..65016a767b7a 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -7,6 +7,31 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(scmi_fc_call,
+ TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2),
+ TP_ARGS(protocol_id, msg_id, res_id, val1, val2),
+
+ TP_STRUCT__entry(
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __field(u32, res_id)
+ __field(u32, val1)
+ __field(u32, val2)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ __entry->res_id = res_id;
+ __entry->val1 = val1;
+ __entry->val2 = val2;
+ ),
+
+ TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u",
+ __entry->protocol_id, __entry->msg_id,
+ __entry->res_id, __entry->val1, __entry->val2)
+);
+
TRACE_EVENT(scmi_xfer_begin,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
bool poll),
@@ -112,6 +137,37 @@ TRACE_EVENT(scmi_rx_done,
__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
__entry->seq, __entry->msg_type)
);
+
+TRACE_EVENT(scmi_msg_dump,
+ TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
+ int status, void *buf, size_t len),
+ TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+
+ TP_STRUCT__entry(
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __array(char, tag, 5)
+ __field(u16, seq)
+ __field(int, status)
+ __field(size_t, len)
+ __dynamic_array(unsigned char, cmd, len)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ strscpy(__entry->tag, tag, 5);
+ __entry->seq = seq;
+ __entry->status = status;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(cmd), buf, __entry->len);
+ ),
+
+ TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->protocol_id, __entry->tag, __entry->msg_id,
+ __entry->seq, __entry->status,
+ __print_hex_str(__get_dynamic_array(cmd), __entry->len))
+);
#endif /* _TRACE_SCMI_H */
/* This part must be outside protection */