aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt2
-rw-r--r--Documentation/ABI/testing/sysfs-driver-altera-cvp8
-rw-r--r--Documentation/admin-guide/devices.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt4
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt24
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-passive-serial.txt29
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt36
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt2
-rw-r--r--Documentation/trace/stm.txt13
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/boot/dts/imx6q-evi.dts16
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--block/genhd.c18
-rw-r--r--drivers/android/Kconfig14
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c3760
-rw-r--r--drivers/android/binder_alloc.c1009
-rw-r--r--drivers/android/binder_alloc.h187
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h96
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/mwave/smapi.c48
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c39
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h13
-rw-r--r--drivers/extcon/Kconfig7
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c50
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max77693.c5
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c417
-rw-r--r--drivers/extcon/extcon.c279
-rw-r--r--drivers/firmware/google/vpd.c10
-rw-r--r--drivers/fmc/Makefile1
-rw-r--r--drivers/fmc/fmc-chardev.c3
-rw-r--r--drivers/fmc/fmc-core.c95
-rw-r--r--drivers/fmc/fmc-debug.c173
-rw-r--r--drivers/fmc/fmc-dump.c41
-rw-r--r--drivers/fmc/fmc-match.c2
-rw-r--r--drivers/fmc/fmc-private.h9
-rw-r--r--drivers/fmc/fmc-sdb.c119
-rw-r--r--drivers/fmc/fmc-trivial.c20
-rw-r--r--drivers/fmc/fmc-write-eeprom.c8
-rw-r--r--drivers/fmc/fru-parse.c3
-rw-r--r--drivers/fpga/Kconfig20
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/altera-cvp.c500
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-ps-spi.c308
-rw-r--r--drivers/fpga/fpga-region.c4
-rw-r--r--drivers/fsi/fsi-core.c4
-rw-r--r--drivers/fsi/fsi-scom.c10
-rw-r--r--drivers/hv/channel.c14
-rw-r--r--drivers/hv/channel_mgmt.c29
-rw-r--r--drivers/hv/hv_balloon.c12
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/ring_buffer.c169
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwtracing/coresight/Kconfig10
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c (renamed from drivers/hwtracing/coresight/coresight-replicator-qcom.c)34
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c108
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h85
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c8
-rw-r--r--drivers/hwtracing/intel_th/core.c359
-rw-r--r--drivers/hwtracing/intel_th/gth.c40
-rw-r--r--drivers/hwtracing/intel_th/gth.h5
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h104
-rw-r--r--drivers/hwtracing/intel_th/msu.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c67
-rw-r--r--drivers/hwtracing/intel_th/pti.c115
-rw-r--r--drivers/hwtracing/intel_th/pti.h8
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/mcb/mcb-lpc.c15
-rw-r--r--drivers/mcb/mcb-parse.c6
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c4
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c34
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c24
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c126
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/lkdtm.h30
-rw-r--r--drivers/misc/lkdtm_bugs.c134
-rw-r--r--drivers/misc/lkdtm_core.c28
-rw-r--r--drivers/misc/lkdtm_refcount.c400
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw-me.c45
-rw-r--r--drivers/misc/mei/hw-me.h39
-rw-r--r--drivers/misc/mei/pci-me.c109
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/sram.c12
-rw-r--r--drivers/misc/ti-st/st_kim.c2
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mux/Makefile5
-rw-r--r--drivers/mux/adg792a.c (renamed from drivers/mux/mux-adg792a.c)0
-rw-r--r--drivers/mux/core.c (renamed from drivers/mux/mux-core.c)14
-rw-r--r--drivers/mux/gpio.c (renamed from drivers/mux/mux-gpio.c)0
-rw-r--r--drivers/mux/mmio.c (renamed from drivers/mux/mux-mmio.c)0
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_ax88796.c6
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c24
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c14
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c10
-rw-r--r--drivers/spmi/spmi-pmic-arb.c837
-rw-r--r--drivers/spmi/spmi.c14
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/switch.c20
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/w1/masters/ds1wm.c108
-rw-r--r--drivers/w1/masters/ds2482.c12
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2438.c9
-rw-r--r--drivers/w1/slaves/w1_ds2805.c313
-rw-r--r--drivers/w1/slaves/w1_therm.c164
-rw-r--r--drivers/w1/w1.c22
-rw-r--r--fs/char_dev.c58
-rw-r--r--fs/proc/devices.c8
-rw-r--r--include/linux/bitrev.h19
-rw-r--r--include/linux/coresight-pmu.h6
-rw-r--r--include/linux/eeprom_93xx46.h3
-rw-r--r--include/linux/extcon.h130
-rw-r--r--include/linux/fmc.h39
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/hyperv.h67
-rw-r--r--include/linux/mfd/cros_ec_commands.h75
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mux/consumer.h2
-rw-r--r--include/linux/nvmem-consumer.h10
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/uapi/linux/android/binder.h16
-rw-r--r--kernel/configs/android-base.config1
-rw-r--r--tools/hv/hv_fcopy_daemon.c32
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/hv/hv_vss_daemon.c7
-rw-r--r--tools/include/linux/coresight-pmu.h6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c28
175 files changed, 9518 insertions, 3067 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
new file mode 100644
index 000000000000..5923ab4620c5
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -0,0 +1,19 @@
+What: /sys/bus/nvmem/devices/.../nvmem
+Date: July 2015
+KernelVersion: 4.2
+Contact: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Description:
+ This file allows user to read/write the raw NVMEM contents.
+ Permissions for write to this file depends on the nvmem
+ provider configuration.
+
+ ex:
+ hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
+
+ 0000000 0000 0000 0000 0000 0000 0000 0000 0000
+ *
+ 00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
+ 0000000 0000 0000 0000 0000 0000 0000 0000 0000
+ ...
+ *
+ 0001000
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 2a98149943ea..392bef5bd399 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -45,6 +45,8 @@ Contact: thunderbolt-software@lists.01.org
Description: When a devices supports Thunderbolt secure connect it will
have this attribute. Writing 32 byte hex string changes
authorization to use the secure connection method instead.
+ Writing an empty string clears the key and regular connection
+ method can be used again.
What: /sys/bus/thunderbolt/devices/.../device
Date: Sep 2017
diff --git a/Documentation/ABI/testing/sysfs-driver-altera-cvp b/Documentation/ABI/testing/sysfs-driver-altera-cvp
new file mode 100644
index 000000000000..8cde64a71edb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-altera-cvp
@@ -0,0 +1,8 @@
+What: /sys/bus/pci/drivers/altera-cvp/chkcfg
+Date: May 2017
+Kernel Version: 4.13
+Contact: Anatolij Gustschin <agust@denx.de>
+Description:
+ Contains either 1 or 0 and controls if configuration
+ error checking in altera-cvp driver is turned on or
+ off.
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index 6b71852dadc2..4ec843123cc3 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -3081,3 +3081,8 @@
1 = /dev/osd1 Second OSD Device
...
255 = /dev/osd255 256th OSD Device
+
+ 384-511 char RESERVED FOR DYNAMIC ASSIGNMENT
+ Character devices that request a dynamic allocation of major
+ number will take numbers starting from 511 and downward,
+ once the 234-254 range is full.
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fcbae6a5e6c1..15ac8e8dcfdf 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -34,8 +34,8 @@ its hardware characteristcs.
- Embedded Trace Macrocell (version 4.x):
"arm,coresight-etm4x", "arm,primecell";
- - Qualcomm Configurable Replicator (version 1.x):
- "qcom,coresight-replicator1x", "arm,primecell";
+ - Coresight programmable Replicator :
+ "arm,coresight-dynamic-replicator", "arm,primecell";
- System Trace Macrocell:
"arm,coresight-stm", "arm,primecell"; [1]
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
new file mode 100644
index 000000000000..8e8625c00dfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
@@ -0,0 +1,24 @@
+ChromeOS EC USB Type-C cable and accessories detection
+
+On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
+able to detect the state of external accessories such as display adapters
+or USB devices when said accessories are attached or detached.
+
+The node for this device must be under a cros-ec node like google,cros-ec-spi
+or google,cros-ec-i2c.
+
+Required properties:
+- compatible: Should be "google,extcon-usbc-cros-ec".
+- google,usb-port-id: Specifies the USB port ID to use.
+
+Example:
+ cros-ec@0 {
+ compatible = "google,cros-ec-i2c";
+
+ ...
+
+ extcon {
+ compatible = "google,extcon-usbc-cros-ec";
+ google,usb-port-id = <0>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt b/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt
new file mode 100644
index 000000000000..48478bc07e29
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt
@@ -0,0 +1,29 @@
+Altera Passive Serial SPI FPGA Manager
+
+Altera FPGAs support a method of loading the bitstream over what is
+referred to as "passive serial".
+The passive serial link is not technically SPI, and might require extra
+circuits in order to play nicely with other SPI slaves on the same bus.
+
+See https://www.altera.com/literature/hb/cyc/cyc_c51013.pdf
+
+Required properties:
+- compatible: Must be one of the following:
+ "altr,fpga-passive-serial",
+ "altr,fpga-arria10-passive-serial"
+- reg: SPI chip select of the FPGA
+- nconfig-gpios: config pin (referred to as nCONFIG in the manual)
+- nstat-gpios: status pin (referred to as nSTATUS in the manual)
+
+Optional properties:
+- confd-gpios: confd pin (referred to as CONF_DONE in the manual)
+
+Example:
+ fpga: fpga@0 {
+ compatible = "altr,fpga-passive-serial";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ confd-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
new file mode 100644
index 000000000000..8dcfba926bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
@@ -0,0 +1,36 @@
+Xilinx LogiCORE Partial Reconfig Decoupler Softcore
+
+The Xilinx LogiCORE Partial Reconfig Decoupler manages one or more
+decouplers / fpga bridges.
+The controller can decouple/disable the bridges which prevents signal
+changes from passing through the bridge. The controller can also
+couple / enable the bridges which allows traffic to pass through the
+bridge normally.
+
+The Driver supports only MMIO handling. A PR region can have multiple
+PR Decouplers which can be handled independently or chained via decouple/
+decouple_status signals.
+
+Required properties:
+- compatible : Should contain "xlnx,pr-decoupler-1.00" followed by
+ "xlnx,pr-decoupler"
+- regs : base address and size for decoupler module
+- clocks : input clock to IP
+- clock-names : should contain "aclk"
+
+Optional properties:
+- bridge-enable : 0 if driver should disable bridge at startup
+ 1 if driver should enable bridge at startup
+ Default is to leave bridge in current state.
+
+See Documentation/devicetree/bindings/fpga/fpga-region.txt for generic bindings.
+
+Example:
+ fpga-bridge@100000450 {
+ compatible = "xlnx,pr-decoupler-1.00",
+ "xlnx-pr-decoupler";
+ regs = <0x10000045 0x10>;
+ clocks = <&clkc 15>;
+ clock-names = "aclk";
+ bridge-enable = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 36b27b1efec6..4e72012928b4 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -175,6 +175,7 @@ kosagi Sutajio Ko-Usagi PTE Ltd.
kyo Kyocera Corporation
lacie LaCie
lantiq Lantiq Semiconductor
+lattice Lattice Semiconductor
lego LEGO Systems A/S
lenovo Lenovo Group Ltd.
lg LG Corporation
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index 299d0923537b..1d11b9002ef8 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -281,6 +281,8 @@
capabilities of the underlying ICAP hardware
differ between different families. May be
'virtex2p', 'virtex4', or 'virtex5'.
+ - compatible : should contain "xlnx,xps-hwicap-1.00.a" or
+ "xlnx,opb-hwicap-1.00.b".
vi) Xilinx Uart 16550
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
index 11cff47eecce..03765750104b 100644
--- a/Documentation/trace/stm.txt
+++ b/Documentation/trace/stm.txt
@@ -83,7 +83,7 @@ by writing the name of the desired stm device there, for example:
$ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
For examples on how to use stm_source interface in the kernel, refer
-to stm_console or stm_heartbeat drivers.
+to stm_console, stm_heartbeat or stm_ftrace drivers.
Each stm_source device will need to assume a master and a range of
channels, depending on how many channels it requires. These are
@@ -107,5 +107,16 @@ console in the STP stream, create a "console" policy entry (see the
beginning of this text on how to do that). When initialized, it will
consume one channel.
+stm_ftrace
+==========
+
+This is another "stm_source" device, once the stm_ftrace has been
+linked with an stm device, and if "function" tracer is enabled,
+function address and parent function address which Ftrace subsystem
+would store into ring buffer will be exported via the stm device at
+the same time.
+
+Currently only Ftrace "function" tracer is supported.
+
[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
[2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/MAINTAINERS b/MAINTAINERS
index 8a28bad13e13..a90fe134ceb1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5362,10 +5362,11 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@kernel.org>
-R: Moritz Fischer <moritz.fischer@ettus.com>
+R: Moritz Fischer <mdf@kernel.org>
L: linux-fpga@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
+Q: http://patchwork.kernel.org/project/linux-fpga/list/
F: Documentation/fpga/
F: Documentation/devicetree/bindings/fpga/
F: drivers/fpga/
@@ -9484,6 +9485,7 @@ M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
S: Maintained
F: drivers/nvmem/
F: Documentation/devicetree/bindings/nvmem/
+F: Documentation/ABI/stable/sysfs-bus-nvmem
F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index 1f0f950dc11e..e0aea782c666 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -94,6 +94,15 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1cs>;
status = "okay";
+
+ fpga: fpga@0 {
+ compatible = "altr,fpga-passive-serial";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ pinctrl-0 = <&pinctrl_fpgaspi>;
+ nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ };
};
&ecspi3 {
@@ -319,6 +328,13 @@
>;
};
+ pinctrl_fpgaspi: fpgaspigrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0
+ MX6QDL_PAD_KEY_ROW2__GPIO4_IO11 0x1b0b0
+ >;
+ };
+
pinctrl_gpminand: gpminandgrp {
fsl,pins = <
MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 2b58c8c1eeaa..58b9291b46d8 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -28,6 +28,8 @@ struct ms_hyperv_info {
u32 features;
u32 misc_features;
u32 hints;
+ u32 max_vp_index;
+ u32 max_lp_index;
};
extern struct ms_hyperv_info ms_hyperv;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 9fc32651c911..fbafd24174af 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -179,9 +179,15 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- pr_info("HyperV: features 0x%x, hints 0x%x\n",
+ pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints);
+ ms_hyperv.max_vp_index = cpuid_eax(HVCPUID_IMPLEMENTATION_LIMITS);
+ ms_hyperv.max_lp_index = cpuid_ebx(HVCPUID_IMPLEMENTATION_LIMITS);
+
+ pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
+ ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
+
/*
* Extract host information.
*/
@@ -214,7 +220,7 @@ static void __init ms_hyperv_init_platform(void)
rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
lapic_timer_frequency = hv_lapic_frequency;
- pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+ pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
lapic_timer_frequency);
}
@@ -248,7 +254,7 @@ static void __init ms_hyperv_init_platform(void)
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
- .name = "Microsoft HyperV",
+ .name = "Microsoft Hyper-V",
.detect = ms_hyperv_platform,
.init_platform = ms_hyperv_init_platform,
};
diff --git a/block/genhd.c b/block/genhd.c
index 7f520fa25d16..51c1d407d93c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -242,6 +242,7 @@ EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
* Can be deleted altogether. Later.
*
*/
+#define BLKDEV_MAJOR_HASH_SIZE 255
static struct blk_major_name {
struct blk_major_name *next;
int major;
@@ -259,12 +260,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
- if (offset < BLKDEV_MAJOR_HASH_SIZE) {
- mutex_lock(&block_class_lock);
- for (dp = major_names[offset]; dp; dp = dp->next)
+ mutex_lock(&block_class_lock);
+ for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
+ if (dp->major == offset)
seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
- mutex_unlock(&block_class_lock);
- }
+ mutex_unlock(&block_class_lock);
}
#endif /* CONFIG_PROC_FS */
@@ -309,6 +309,14 @@ int register_blkdev(unsigned int major, const char *name)
ret = major;
}
+ if (major >= BLKDEV_MAJOR_MAX) {
+ pr_err("register_blkdev: major requested (%d) is greater than the maximum (%d) for %s\n",
+ major, BLKDEV_MAJOR_MAX, name);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
if (p == NULL) {
ret = -ENOMEM;
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 832e885349b1..9801d852bd56 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder,hwbinder"
+ default "binder,hwbinder,vndbinder"
---help---
Default value for the binder.devices parameter.
@@ -32,7 +32,7 @@ config ANDROID_BINDER_DEVICES
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_32BIT
- bool
+ bool "Use old (Android 4.4 and earlier) 32-bit binder API"
depends on !64BIT && ANDROID_BINDER_IPC
default y
---help---
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b072c58..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 831cdd7d197d..d055b3f2a207 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -35,30 +68,31 @@
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -88,8 +122,6 @@ BINDER_DEBUG_ENTRY(proc);
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
@@ -104,17 +136,13 @@ enum {
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -171,26 +199,27 @@ enum binder_stat_types {
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -200,11 +229,14 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -214,19 +246,26 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -237,11 +276,20 @@ struct binder_device {
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -249,8 +297,72 @@ struct binder_work {
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -261,88 +373,167 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
-
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
+ bool is_dead;
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -350,10 +541,13 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
+ int tmp_ref;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -362,22 +556,58 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
};
struct binder_transaction {
@@ -397,10 +627,253 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
@@ -451,462 +924,159 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
-static inline void binder_lock(const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
- long min_nice;
-
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
- return;
- }
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
+ bool has_work;
- BUG_ON(!new_buffer->free);
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return has_work;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
+ struct rb_node *n;
+ struct binder_thread *thread;
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
}
- return NULL;
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(proc->tsk);
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
+ struct binder_thread *thread;
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ assert_spin_locked(&proc->inner_lock);
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return 0;
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return -ENOMEM;
+ return thread;
}
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
+ assert_spin_locked(&proc->inner_lock);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
-
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ wake_up_interruptible(&thread->wait);
+ return;
}
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_set_nice(long nice)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
+ long min_nice;
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
}
- binder_insert_free_buffer(proc, buffer);
+ min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: nice value %ld not allowed use %ld instead\n",
+ current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice <= MAX_NICE)
+ return;
+ binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ assert_spin_locked(&proc->inner_lock);
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -914,21 +1084,46 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+
+ assert_spin_locked(&proc->inner_lock);
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -936,33 +1131,74 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -978,8 +1214,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -990,58 +1226,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
+ return false;
+}
- return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
}
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1049,11 +1396,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1063,14 +1410,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1083,22 +1450,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1106,121 +1473,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
- }
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
+ }
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ assert_spin_locked(&target_thread->proc->inner_lock);
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
}
- t->need_reply = 0;
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1235,30 +1904,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
-
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1267,7 +1934,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1476,24 +2143,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1532,7 +2201,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1564,102 +2234,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1750,7 +2440,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,12 +2509,80 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread)
+ target_list = &thread->todo;
+ else if (!target_list)
+ target_list = &proc->todo;
+ else
+ BUG_ON(target_list != &node->async_todo);
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1835,19 +2594,21 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1857,29 +2618,40 @@ static void binder_transaction(struct binder_proc *proc,
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ binder_set_nice(in_reply_to->saved_priority);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1888,89 +2660,137 @@ static void binder_transaction(struct binder_proc *proc,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2004,11 +2824,18 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2016,9 +2843,6 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
@@ -2028,6 +2852,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
@@ -2035,12 +2861,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2048,6 +2878,8 @@ static void binder_transaction(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2064,6 +2896,8 @@ static void binder_transaction(struct binder_proc *proc,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2078,6 +2912,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2089,6 +2925,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2100,6 +2938,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2116,6 +2956,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2125,12 +2967,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2146,6 +2992,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (copy_from_user(sg_bufp,
@@ -2153,12 +3001,15 @@ static void binder_transaction(struct binder_proc *proc,
bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2167,6 +3018,8 @@ static void binder_transaction(struct binder_proc *proc,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2176,38 +3029,60 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
- }
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait) {
- if (reply || !(t->flags & TF_ONE_WAY))
- wake_up_interruptible_sync(target_wait);
- else
- wake_up_interruptible(target_wait);
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2215,8 +3090,9 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2229,24 +3105,49 @@ err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2260,15 +3161,17 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2276,53 +3179,61 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2330,6 +3241,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
@@ -2354,13 +3266,17 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2369,16 +3285,23 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2396,7 +3319,8 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2418,15 +3342,27 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
- else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = 0;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2457,6 +3393,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2470,6 +3407,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2494,7 +3432,7 @@ static int binder_thread_write(struct binder_proc *proc,
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2502,7 +3440,29 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2510,6 +3470,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2519,21 +3481,18 @@ static int binder_thread_write(struct binder_proc *proc,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc(sizeof(*death), GFP_KERNEL);
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2542,17 +3501,19 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
- }
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2561,22 +3522,35 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
@@ -2587,8 +3561,13 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2602,19 +3581,25 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
+ binder_inner_proc_unlock(proc);
} break;
default:
@@ -2632,23 +3617,79 @@ static void binder_stat_br(struct binder_proc *proc,
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
}
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2670,37 +3711,15 @@ static int binder_thread_read(struct binder_proc *proc,
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2710,23 +3729,15 @@ retry:
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
- binder_lock(__func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2735,31 +3746,52 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
+
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2769,113 +3801,134 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
+
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
-
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
+
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2907,8 +3960,9 @@ retry:
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2918,18 +3972,24 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -2939,21 +3999,22 @@ retry:
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -2961,29 +4022,36 @@ retry:
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -2996,11 +4064,17 @@ static void binder_release_work(struct list_head *list)
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3027,7 +4101,8 @@ static void binder_release_work(struct list_head *list)
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3042,38 +4117,99 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3094,12 +4230,16 @@ static int binder_free_thread(struct binder_proc *proc,
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3108,30 +4248,24 @@ static unsigned int binder_poll(struct file *filp,
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(__func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(__func__);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
return 0;
}
@@ -3178,8 +4312,10 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
@@ -3204,9 +4340,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3227,19 +4364,49 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info)
+{
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3251,13 +4418,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3270,12 +4438,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3284,7 +4459,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3301,6 +4476,24 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -3308,8 +4501,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3338,8 +4530,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3357,10 +4548,8 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
if (proc->tsk != current->group_leader)
return -EINVAL;
@@ -3369,8 +4558,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3380,73 +4569,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3464,24 +4595,26 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3517,16 +4650,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3547,13 +4681,21 @@ static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3561,45 +4703,58 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3607,15 +4762,25 @@ static void binder_deferred_release(struct binder_proc *proc)
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3625,73 +4790,42 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3702,7 +4836,6 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
@@ -3729,7 +4862,6 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
@@ -3749,41 +4881,51 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3791,8 +4933,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3817,40 +4967,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -3860,27 +5016,34 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3890,36 +5053,60 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
- }
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -3985,17 +5172,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4003,11 +5194,15 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4015,51 +5210,61 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4070,57 +5275,67 @@ static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
-
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4128,44 +5343,63 @@ static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4200,6 +5434,7 @@ static int __init init_binder_device(const char *name)
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4215,10 +5450,15 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names;
+ char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4263,7 +5503,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
- while ((device_name = strsep(&device_names, ","))) {
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -4277,6 +5518,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
+
+ kfree(device_names);
+
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..8fe165844e47
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,1009 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+struct list_lru binder_alloc_lru;
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer->data < buffer->data)
+ p = &parent->rb_left;
+ else if (new_buffer->data > buffer->data)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ void *kern_ptr;
+
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer->data)
+ n = n->rb_left;
+ else if (kern_ptr > buffer->data)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct binder_lru_page *page;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ if (!vma && need_mm)
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (!vma && need_mm) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ bool on_lru;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+
+ trace_binder_alloc_page_end(alloc, index);
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+err_alloc_page_failed:
+err_page_ptr_cleared:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ WARN_ON(n && buffer_size != size);
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer;
+
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+}
+
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ bool to_free = true;
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = binder_buffer_prev(buffer);
+ BUG_ON(!prev->free);
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
+ }
+ }
+
+ if (PAGE_ALIGNED(buffer->data)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
+ }
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = binder_buffer_next(buffer);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
+ }
+
+ buffer->data = alloc->buffer;
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_buf_struct_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+ struct binder_buffer *buffer;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+ bool on_lru;
+
+ if (!alloc->pages[i].page_ptr)
+ continue;
+
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i].page_ptr);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ if (alloc->vma) {
+ mm = get_task_mm(alloc->tsk);
+ if (!mm)
+ goto err_get_task_mm_failed;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(alloc->vma,
+ page_addr + alloc->user_buffer_offset,
+ PAGE_SIZE);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ list_lru_isolate(lru, item);
+
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+ mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+}
+
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 000000000000..a3a3602c689c
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list_lru.h>
+
+extern struct list_lru binder_alloc_lru;
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of binder_lru_page
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct binder_lru_page *pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -288,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 7a8b8fb2f572..df126dcdaf18 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -877,21 +877,21 @@ static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
spin_unlock_irq(&pprt_lock);
}
-static struct charlcd_ops charlcd_serial_ops = {
+static const struct charlcd_ops charlcd_serial_ops = {
.write_cmd = lcd_write_cmd_s,
.write_data = lcd_write_data_s,
.clear_fast = lcd_clear_fast_s,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_parallel_ops = {
+static const struct charlcd_ops charlcd_parallel_ops = {
.write_cmd = lcd_write_cmd_p8,
.write_data = lcd_write_data_p8,
.clear_fast = lcd_clear_fast_p8,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_tilcd_ops = {
+static const struct charlcd_ops charlcd_tilcd_ops = {
.write_cmd = lcd_write_cmd_tilcd,
.write_data = lcd_write_data_tilcd,
.clear_fast = lcd_clear_fast_tilcd,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index b67263d6e34b..c0a5b1f3a986 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -67,7 +67,7 @@ static char *applicom_pci_devnames[] = {
"PCI2000PFB"
};
-static struct pci_device_id applicom_pci_tbl[] = {
+static const struct pci_device_id applicom_pci_tbl[] = {
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 8c5411a8f33f..691f5898bb32 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -128,10 +128,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
int bRC = -EIO;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
@@ -148,7 +149,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
- if ((usDI & 0x00FF) < numDspBases) {
+ if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
} else {
pSettings->usDspBaseIO = 0;
@@ -176,7 +177,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
- if (((usSI & 0xFF00) >> 8) < numUartBases) {
+ if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
} else {
pSettings->usUartBaseIO = 0;
@@ -205,15 +206,16 @@ int smapi_set_DSP_cfg(void)
int bRC = -EIO;
int i;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
- unsigned short ausUartIrqs[] = { 3, 4 };
-
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
- unsigned short numDspIrqs = 5;
- unsigned short numUartIrqs = 2;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ static const unsigned short ausDspIrqs[] = {
+ 5, 7, 10, 11, 15 };
+ static const unsigned short ausUartIrqs[] = {
+ 3, 4 };
+
unsigned short dspio_index = 0, uartio_index = 0;
PRINTK_5(TRACE_SMAPI,
@@ -221,11 +223,11 @@ int smapi_set_DSP_cfg(void)
mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
if (mwave_3780i_io) {
- for (i = 0; i < numDspBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
- if (i == numDspBases) {
+ if (i == ARRAY_SIZE(ausDspBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
return bRC;
}
@@ -233,22 +235,22 @@ int smapi_set_DSP_cfg(void)
}
if (mwave_3780i_irq) {
- for (i = 0; i < numDspIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
if (mwave_3780i_irq == ausDspIrqs[i])
break;
}
- if (i == numDspIrqs) {
+ if (i == ARRAY_SIZE(ausDspIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
return bRC;
}
}
if (mwave_uart_io) {
- for (i = 0; i < numUartBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
if (mwave_uart_io == ausUartBases[i])
break;
}
- if (i == numUartBases) {
+ if (i == ARRAY_SIZE(ausUartBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
return bRC;
}
@@ -257,11 +259,11 @@ int smapi_set_DSP_cfg(void)
if (mwave_uart_irq) {
- for (i = 0; i < numUartIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
if (mwave_uart_irq == ausUartIrqs[i])
break;
}
- if (i == numUartIrqs) {
+ if (i == ARRAY_SIZE(ausUartIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
return bRC;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 3e73bcdf9e65..d256110ba672 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -101,9 +101,6 @@ static DEFINE_IDA(ida_index);
#define PP_BUFFER_SIZE 1024
#define PARDEVICE_MAX 8
-/* ROUND_UP macro from fs/select.c */
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
-
static DEFINE_MUTEX(pp_do_mutex);
/* define fixed sized ioctl cmd for y2038 migration */
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 572a51704e67..6210bff46341 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -766,7 +766,7 @@ static struct attribute *tlclk_sysfs_entries[] = {
NULL
};
-static struct attribute_group tlclk_attribute_group = {
+static const struct attribute_group tlclk_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = tlclk_sysfs_entries,
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 4d229dde6522..23f33f95d4a6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1308,7 +1308,7 @@ static struct attribute *port_sysfs_entries[] = {
NULL
};
-static struct attribute_group port_attribute_group = {
+static const struct attribute_group port_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = port_sysfs_entries,
};
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 3e6b23c3453c..067396bedf22 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -86,8 +86,7 @@
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/uaccess.h>
#ifdef CONFIG_OF
@@ -222,6 +221,8 @@ static const struct config_registers v6_config_registers = {
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
+ * Returns: '0' on success and failure value on error
+ *
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
@@ -251,10 +252,12 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
- * register value to be returned.
- * Examples: XHI_IDCODE, XHI_FLR.
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
+ * Returns: '0' on success and failure value on error
+ *
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
@@ -320,7 +323,8 @@ static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
- * ICAP in a good state. */
+ * ICAP in a good state.
+ */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
@@ -632,7 +636,6 @@ static int hwicap_setup(struct device *dev, int id,
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
@@ -759,20 +762,20 @@ static int hwicap_of_probe(struct platform_device *op,
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
@@ -802,20 +805,20 @@ static int hwicap_drv_probe(struct platform_device *pdev)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&pdev->dev, pdev->id, res,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 38b145eaf24d..6b963d1c8ba3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -62,11 +62,13 @@ struct hwicap_drvdata {
struct hwicap_driver_config {
/* Read configuration data given by size into the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Write configuration data given by size from the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Get the status register, bit pattern given by:
@@ -193,11 +195,12 @@ struct config_registers {
* hwicap_type_1_read - Generates a Type 1 read packet header.
* @reg: is the address of the register to be read back.
*
+ * Return:
* Generates a Type 1 read packet header, which is used to indirectly
* read registers in the configuration logic. This packet must then
* be sent through the icap device, and a return packet received with
* the information.
- **/
+ */
static inline u32 hwicap_type_1_read(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
@@ -208,7 +211,9 @@ static inline u32 hwicap_type_1_read(u32 reg)
/**
* hwicap_type_1_write - Generates a Type 1 write packet header
* @reg: is the address of the register to be read back.
- **/
+ *
+ * Return: Type 1 write packet header
+ */
static inline u32 hwicap_type_1_write(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6d50071f07d5..a7bca4207f44 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -150,4 +150,11 @@ config EXTCON_USB_GPIO
Say Y here to enable GPIO based USB cable detection extcon support.
Used typically if GPIO is used for USB ID pin detection.
+config EXTCON_USBC_CROS_EC
+ tristate "ChromeOS Embedded Controller EXTCON support"
+ depends on MFD_CROS_EC
+ help
+ Say Y here to enable USB Type C cable detection extcon support when
+ using Chrome OS EC based USB Type-C ports.
+
endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index ecfa95804427..a73624e76193 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
+obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index 186fd735eb28..f599aeddf8e5 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -1,5 +1,5 @@
/*
- * drivers/extcon/devres.c - EXTCON device's resource management
+ * drivers/extcon/devres.c - EXTCON device's resource management
*
* Copyright (C) 2016 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -59,10 +59,9 @@ static void devm_extcon_dev_notifier_all_unreg(struct device *dev, void *res)
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev: device owning the extcon device being created
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @dev: the device owning the extcon device being created
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
@@ -97,8 +96,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
/**
* devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be freed
*
* Free the memory that is allocated with devm_extcon_dev_allocate()
* function.
@@ -112,10 +111,9 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev: device to allocate extcon device
- * @edev: the new extcon device to register
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be registered
*
- * Managed extcon_dev_register() function. If extcon device is attached with
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
@@ -149,8 +147,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to unregistered
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
@@ -164,10 +162,10 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
/**
* devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -208,10 +206,10 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- Resource-managed extcon_unregister_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
@@ -225,9 +223,9 @@ EXPORT_SYMBOL(devm_extcon_unregister_notifier);
/**
* devm_extcon_register_notifier_all()
* - Resource-managed extcon_register_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -263,9 +261,9 @@ EXPORT_SYMBOL(devm_extcon_register_notifier_all);
/**
* devm_extcon_unregister_notifier_all()
* - Resource-managed extcon_unregister_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index d9f9afe45961..1a45e745717d 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -171,7 +171,7 @@ static int int3496_remove(struct platform_device *pdev)
return 0;
}
-static struct acpi_device_id int3496_acpi_match[] = {
+static const struct acpi_device_id int3496_acpi_match[] = {
{ "INT3496" },
{ }
};
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 62163468f205..7a5856809047 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -811,9 +811,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
*/
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
- if (!cable_attached)
- extcon_set_state_sync(info->edev,
- EXTCON_DISP_MHL, cable_attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
+ cable_attached);
break;
}
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
new file mode 100644
index 000000000000..598956f1dcae
--- /dev/null
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -0,0 +1,417 @@
+/**
+ * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
+ *
+ * Copyright (C) 2017 Google, Inc
+ * Author: Benson Leung <bleung@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+struct cros_ec_extcon_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+
+ int port_id;
+
+ struct cros_ec_device *ec;
+
+ struct notifier_block notifier;
+
+ bool dp; /* DisplayPort enabled */
+ bool mux; /* SuperSpeed (usb3) enabled */
+ unsigned int power_type;
+};
+
+static const unsigned int usb_type_c_cable[] = {
+ EXTCON_DISP_DP,
+ EXTCON_NONE,
+};
+
+/**
+ * cros_ec_pd_command() - Send a command to the EC.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @command: EC command
+ * @version: EC command version
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
+ unsigned int command,
+ unsigned int version,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(info->ec, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * cros_ec_usb_get_power_type() - Get power type info about PD device attached
+ * to given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: power type on success, <0 on failure.
+ */
+static int cros_ec_usb_get_power_type(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_power_info req;
+ struct ec_response_usb_pd_power_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_POWER_INFO, 0,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.type;
+}
+
+/**
+ * cros_ec_usb_get_pd_mux_state() - Get PD mux state for given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: PD mux state on success, <0 on failure.
+ */
+static int cros_ec_usb_get_pd_mux_state(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_mux_info req;
+ struct ec_response_usb_pd_mux_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_MUX_INFO, 0,
+ &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.flags;
+}
+
+/**
+ * cros_ec_usb_get_role() - Get role info about possible PD device attached to a
+ * given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @polarity: pointer to cable polarity (return value)
+ *
+ * Return: role info on success, -ENOTCONN if no cable is connected, <0 on
+ * failure.
+ */
+static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
+ bool *polarity)
+{
+ struct ec_params_usb_pd_control pd_control;
+ struct ec_response_usb_pd_control_v1 resp;
+ int ret;
+
+ pd_control.port = info->port_id;
+ pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
+ &pd_control, sizeof(pd_control),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!(resp.enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ return -ENOTCONN;
+
+ *polarity = resp.polarity;
+
+ return resp.role;
+}
+
+/**
+ * cros_ec_pd_get_num_ports() - Get number of EC charge ports.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: number of ports on success, <0 on failure.
+ */
+static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
+{
+ struct ec_response_usb_pd_ports resp;
+ int ret;
+
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_PORTS,
+ 0, NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.num_ports;
+}
+
+static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
+ bool force)
+{
+ struct device *dev = info->dev;
+ int role, power_type;
+ bool polarity = false;
+ bool dp = false;
+ bool mux = false;
+ bool hpd = false;
+
+ power_type = cros_ec_usb_get_power_type(info);
+ if (power_type < 0) {
+ dev_err(dev, "failed getting power type err = %d\n",
+ power_type);
+ return power_type;
+ }
+
+ role = cros_ec_usb_get_role(info, &polarity);
+ if (role < 0) {
+ if (role != -ENOTCONN) {
+ dev_err(dev, "failed getting role err = %d\n", role);
+ return role;
+ }
+ } else {
+ int pd_mux_state;
+
+ pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
+ if (pd_mux_state < 0)
+ pd_mux_state = USB_PD_MUX_USB_ENABLED;
+
+ dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
+ mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
+ hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
+ }
+
+ if (force || info->dp != dp || info->mux != mux ||
+ info->power_type != power_type) {
+
+ info->dp = dp;
+ info->mux = mux;
+ info->power_type = power_type;
+
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+
+ } else if (hpd) {
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+ }
+
+ return 0;
+}
+
+static int extcon_cros_ec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec;
+ u32 host_event;
+
+ info = container_of(nb, struct cros_ec_extcon_info, notifier);
+ ec = info->ec;
+
+ host_event = cros_ec_get_host_event(ec);
+ if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
+ extcon_cros_ec_detect_cable(info, false);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int extcon_cros_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int numports, ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->ec = ec;
+
+ if (np) {
+ u32 port;
+
+ ret = of_property_read_u32(np, "google,usb-port-id", &port);
+ if (ret < 0) {
+ dev_err(dev, "Missing google,usb-port-id property\n");
+ return ret;
+ }
+ info->port_id = port;
+ } else {
+ info->port_id = pdev->id;
+ }
+
+ numports = cros_ec_pd_get_num_ports(info);
+ if (numports < 0) {
+ dev_err(dev, "failed getting number of ports! ret = %d\n",
+ numports);
+ return numports;
+ }
+
+ if (info->port_id >= numports) {
+ dev_err(dev, "This system only supports %d ports\n", numports);
+ return -ENODEV;
+ }
+
+ info->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, info->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD);
+
+ platform_set_drvdata(pdev, info);
+
+ /* Get PD events from the EC */
+ info->notifier.notifier_call = extcon_cros_ec_event;
+ ret = blocking_notifier_chain_register(&info->ec->event_notifier,
+ &info->notifier);
+ if (ret < 0) {
+ dev_err(dev, "failed to register notifier\n");
+ return ret;
+ }
+
+ /* Perform initial detection */
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to detect initial cable state\n");
+ goto unregister_notifier;
+ }
+
+ return 0;
+
+unregister_notifier:
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+ return ret;
+}
+
+static int extcon_cros_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info = platform_get_drvdata(pdev);
+
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int extcon_cros_ec_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int extcon_cros_ec_resume(struct device *dev)
+{
+ int ret;
+ struct cros_ec_extcon_info *info = dev_get_drvdata(dev);
+
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0)
+ dev_err(dev, "failed to detect cable state on resume\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops extcon_cros_ec_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(extcon_cros_ec_suspend, extcon_cros_ec_resume)
+};
+
+#define DEV_PM_OPS (&extcon_cros_ec_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_OF
+static const struct of_device_id extcon_cros_ec_of_match[] = {
+ { .compatible = "google,extcon-usbc-cros-ec" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, extcon_cros_ec_of_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver extcon_cros_ec_driver = {
+ .driver = {
+ .name = "extcon-usbc-cros-ec",
+ .of_match_table = of_match_ptr(extcon_cros_ec_of_match),
+ .pm = DEV_PM_OPS,
+ },
+ .remove = extcon_cros_ec_remove,
+ .probe = extcon_cros_ec_probe,
+};
+
+module_platform_driver(extcon_cros_ec_driver);
+
+MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 8eccf7b14937..35e9fb885486 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1,7 +1,5 @@
/*
- * drivers/extcon/extcon.c - External Connector (extcon) framework.
- *
- * External connector (extcon) class driver
+ * drivers/extcon/extcon.c - External Connector (extcon) framework.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -37,7 +35,6 @@
#include "extcon.h"
#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
struct __extcon_info {
unsigned int type;
@@ -200,13 +197,13 @@ struct __extcon_info {
};
/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev: The extcon device
- * @cable_index: Index of this cable in the edev
- * @attr_g: Attribute group for the cable
+ * struct extcon_cable - An internal data for an external connector.
+ * @edev: the extcon device
+ * @cable_index: the index of this cable in the edev
+ * @attr_g: the attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
- * @attrs: Array pointing to attr_name and attr_state for attr_g
+ * @attrs: the array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -234,15 +231,6 @@ static struct class *extcon_class;
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
-/**
- * check_mutually_exclusive - Check if new_state violates mutually_exclusive
- * condition.
- * @edev: the extcon device
- * @new_state: new cable attach status for @edev
- *
- * Returns 0 if nothing violates. Returns the index + 1 for the first
- * violated condition.
- */
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
int i = 0;
@@ -417,11 +405,13 @@ static ssize_t cable_state_show(struct device *dev,
}
/**
- * extcon_sync() - Synchronize the states for both the attached/detached
- * @edev: the extcon device that has the cable.
+ * extcon_sync() - Synchronize the state for an external connector.
+ * @edev: the extcon device
+ *
+ * Note that this function send a notification in order to synchronize
+ * the state and property of an external connector.
*
- * This function send a notification to synchronize the all states of a
- * specific external connector
+ * Returns 0 if success or error number if fail.
*/
int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
@@ -497,9 +487,11 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
EXPORT_SYMBOL_GPL(extcon_sync);
/**
- * extcon_get_state() - Get the state of a external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector in extcon enumeration.
+ * extcon_get_state() - Get the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
{
@@ -522,20 +514,19 @@ int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
EXPORT_SYMBOL_GPL(extcon_get_state);
/**
- * extcon_set_state() - Set the state of a external connector.
- * without a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state() - Set the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of an external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of an external connector without
+ * a notification. To synchronize the state of an external connector,
+ * have to use extcon_set_state_sync() and extcon_sync().
*
- * This function only set the state of a external connector without
- * a notification. To synchronize the data of a external connector,
- * use extcon_set_state_sync() and extcon_sync().
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state)
{
unsigned long flags;
int index, ret = 0;
@@ -550,11 +541,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the external connector's state is changed. */
- if (!is_extcon_changed(edev, index, cable_state))
+ if (!is_extcon_changed(edev, index, state))
goto out;
if (check_mutually_exclusive(edev,
- (edev->state & ~BIT(index)) | (cable_state & BIT(index)))) {
+ (edev->state & ~BIT(index)) | (state & BIT(index)))) {
ret = -EPERM;
goto out;
}
@@ -563,11 +554,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
* Initialize the value of extcon property before setting
* the detached state for an external connector.
*/
- if (!cable_state)
+ if (!state)
init_property(edev, id, index);
- /* Update the state for a external connector. */
- if (cable_state)
+ /* Update the state for an external connector. */
+ if (state)
edev->state |= BIT(index);
else
edev->state &= ~(BIT(index));
@@ -579,19 +570,18 @@ out:
EXPORT_SYMBOL_GPL(extcon_set_state);
/**
- * extcon_set_state_sync() - Set the state of a external connector
- * with a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state_sync() - Set the state of an external connector with sync.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of external connector
+ * and synchronize the state by sending a notification.
*
- * This function set the state of external connector and synchronize the data
- * by usning a notification.
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
int ret, index;
unsigned long flags;
@@ -602,12 +592,12 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
/* Check whether the external connector's state is changed. */
spin_lock_irqsave(&edev->lock, flags);
- ret = is_extcon_changed(edev, index, cable_state);
+ ret = is_extcon_changed(edev, index, state);
spin_unlock_irqrestore(&edev->lock, flags);
if (!ret)
return 0;
- ret = extcon_set_state(edev, id, cable_state);
+ ret = extcon_set_state(edev, id, state);
if (ret < 0)
return ret;
@@ -616,19 +606,18 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_state_sync);
/**
- * extcon_get_property() - Get the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer which store the value of property.
+ * extcon_get_property() - Get the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer which store the value of extcon property
*
- * When getting the property value of external connector, the external connector
- * should be attached. If detached state, function just return 0 without
- * property value. Also, the each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when getting the property value of external connector,
+ * the external connector should be attached. If detached state, function
+ * return 0 without property value. Also, the each property should be
+ * included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -698,17 +687,16 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property);
/**
- * extcon_set_property() - Set the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property() - Set the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer including the new value of extcon property
*
- * The each property should be included in the list of supported properties
- * according to the type of external connectors.
+ * Note that each property should be included in the list of supported
+ * properties according to the extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -766,15 +754,14 @@ int extcon_set_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property);
/**
- * extcon_set_property_sync() - Set the property value of a specific cable
- with a notification.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property_sync() - Set property of an external connector with sync.
+ * @prop_val: the pointer including the new value of extcon property
*
- * When setting the property value of external connector, the external connector
- * should be attached. The each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when setting the property value of external connector,
+ * the external connector should be attached. The each property should
+ * be included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -791,12 +778,11 @@ int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_sync);
/**
- * extcon_get_property_capability() - Get the capability of property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_get_property_capability() - Get the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
* Returns 1 if the property is available or 0 if not available.
*/
@@ -822,18 +808,17 @@ int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property_capability);
/**
- * extcon_set_property_capability() - Set the capability of a property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_set_property_capability() - Set the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
- * This function set the capability of a property for an external connector
- * to mark the bit in capability bitmap which mean the available state of
- * a property.
+ * Note that this function set the capability of the property
+ * for an external connector in order to mark the bit in capability
+ * bitmap which mean the available state of the property.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
unsigned int prop)
@@ -881,8 +866,10 @@ int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_capability);
/**
- * extcon_get_extcon_dev() - Get the extcon device instance from the name
- * @extcon_name: The extcon name provided with extcon_dev_register()
+ * extcon_get_extcon_dev() - Get the extcon device instance from the name.
+ * @extcon_name: the extcon name provided with extcon_dev_register()
+ *
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -904,15 +891,17 @@ out:
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
- * extcon_register_notifier() - Register a notifiee to get notified by
- * any attach status changes from the extcon.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier() - Register a notifier block to get notified by
+ * any state changes from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
*
* Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
+ * the current state of an external connector and the third pameter
+ * is the pointer of extcon device.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -936,10 +925,12 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_register_notifier);
/**
- * extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_unregister_notifier() - Unregister a notifier block from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -963,16 +954,16 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
/**
- * extcon_register_notifier_all() - Register a notifier block for all connectors
- * @edev: the extcon device that has the external connector.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier_all() - Register a notifier block for all connectors.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * This function registers a notifier block in order to receive the state
- * change of all supported external connectors from extcon device.
+ * Note that this function registers a notifier block in order to receive
+ * the state change of all supported external connectors from extcon device.
* And the second parameter given to the callback of nb (val) is
- * the current state and third parameter is the edev pointer.
+ * the current state and the third pameter is the pointer of extcon device.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -993,10 +984,10 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier_all);
/**
* extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -1045,15 +1036,14 @@ static void dummy_sysfs_dev_release(struct device *dev)
/*
* extcon_dev_allocate() - Allocate the memory of extcon device.
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
- * This function allocates the memory for extcon device without allocating
- * memory in each extcon provider driver and initialize default setting for
- * extcon device.
+ * Note that this function allocates the memory for extcon device
+ * and initialize default setting for the extcon device.
*
- * Return the pointer of extcon device if success or ERR_PTR(err) if fail
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
@@ -1074,7 +1064,7 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
/*
* extcon_dev_free() - Free the memory of extcon device.
- * @edev: the extcon device to free
+ * @edev: the extcon device
*/
void extcon_dev_free(struct extcon_dev *edev)
{
@@ -1083,13 +1073,18 @@ void extcon_dev_free(struct extcon_dev *edev)
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
- * extcon_dev_register() - Register a new extcon device
- * @edev : the new extcon device (should be allocated before calling)
+ * extcon_dev_register() - Register an new extcon device
+ * @edev: the extcon device to be registered
*
* Among the members of edev struct, please set the "user initializing data"
- * in any case and set the "optional callbacks" if required. However, please
* do not set the values of "internal data", which are initialized by
* this function.
+ *
+ * Note that before calling this funciton, have to allocate the memory
+ * of an extcon device by using the extcon_dev_allocate(). And the extcon
+ * dev should include the supported_cable information.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_dev_register(struct extcon_dev *edev)
{
@@ -1296,7 +1291,7 @@ EXPORT_SYMBOL_GPL(extcon_dev_register);
/**
* extcon_dev_unregister() - Unregister the extcon device.
- * @edev: the extcon device instance to be unregistered.
+ * @edev: the extcon device to be unregistered.
*
* Note that this does not call kfree(edev) because edev was not allocated
* by this class.
@@ -1342,11 +1337,11 @@ EXPORT_SYMBOL_GPL(extcon_dev_unregister);
#ifdef CONFIG_OF
/*
- * extcon_get_edev_by_phandle - Get the extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree.
+ * @dev : the instance to the given device
+ * @index : the index into list of extcon_dev
*
- * return the instance of extcon device
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
@@ -1363,8 +1358,8 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
node = of_parse_phandle(dev->of_node, "extcon", index);
if (!node) {
- dev_dbg(dev, "failed to get phandle in %s node\n",
- dev->of_node->full_name);
+ dev_dbg(dev, "failed to get phandle in %pOF node\n",
+ dev->of_node);
return ERR_PTR(-ENODEV);
}
@@ -1411,8 +1406,6 @@ static void __exit extcon_class_exit(void)
module_exit(extcon_class_exit);
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("External connector (extcon) class driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("External Connector (extcon) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 78945729388e..35e553b3b190 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -202,7 +202,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
- goto err_iounmap;
+ goto err_memunmap;
}
sysfs_bin_attr_init(&sec->bin_attr);
@@ -233,8 +233,8 @@ err_sysfs_remove:
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
err_free_raw_name:
kfree(sec->raw_name);
-err_iounmap:
- iounmap(sec->baseaddr);
+err_memunmap:
+ memunmap(sec->baseaddr);
return err;
}
@@ -245,7 +245,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
kobject_put(sec->kobj);
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
- iounmap(sec->baseaddr);
+ memunmap(sec->baseaddr);
}
return 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
return -ENOMEM;
memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
- iounmap(temp);
+ memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
return -ENODEV;
diff --git a/drivers/fmc/Makefile b/drivers/fmc/Makefile
index b9452919739f..e809322e1bac 100644
--- a/drivers/fmc/Makefile
+++ b/drivers/fmc/Makefile
@@ -6,6 +6,7 @@ fmc-y += fmc-match.o
fmc-y += fmc-sdb.o
fmc-y += fru-parse.o
fmc-y += fmc-dump.o
+fmc-y += fmc-debug.o
obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o
diff --git a/drivers/fmc/fmc-chardev.c b/drivers/fmc/fmc-chardev.c
index ace6ef24d15e..5ecf4090a610 100644
--- a/drivers/fmc/fmc-chardev.c
+++ b/drivers/fmc/fmc-chardev.c
@@ -129,8 +129,7 @@ static int fc_probe(struct fmc_device *fmc)
struct fc_instance *fc;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fc_drv);
+ index = fmc_validate(fmc, &fc_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 353fc546fb08..cec3b8db0d69 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -13,6 +13,9 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fmc.h>
+#include <linux/fmc-sdb.h>
+
+#include "fmc-private.h"
static int fmc_check_version(unsigned long version, const char *name)
{
@@ -118,6 +121,61 @@ static struct bin_attribute fmc_eeprom_attr = {
.write = fmc_write_eeprom,
};
+int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags)
+{
+ if (fmc->op->irq_request)
+ return fmc->op->irq_request(fmc, h, name, flags);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_irq_request);
+
+void fmc_irq_free(struct fmc_device *fmc)
+{
+ if (fmc->op->irq_free)
+ fmc->op->irq_free(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_free);
+
+void fmc_irq_ack(struct fmc_device *fmc)
+{
+ if (likely(fmc->op->irq_ack))
+ fmc->op->irq_ack(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_ack);
+
+int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv)
+{
+ if (fmc->op->validate)
+ return fmc->op->validate(fmc, drv);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_validate);
+
+int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, int ngpio)
+{
+ if (fmc->op->gpio_config)
+ return fmc->op->gpio_config(fmc, gpio, ngpio);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_gpio_config);
+
+int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l)
+{
+ if (fmc->op->read_ee)
+ return fmc->op->read_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_read_ee);
+
+int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l)
+{
+ if (fmc->op->write_ee)
+ return fmc->op->write_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_write_ee);
+
/*
* Functions for client modules follow
*/
@@ -141,7 +199,8 @@ EXPORT_SYMBOL(fmc_driver_unregister);
* When a device set is registered, all eeproms must be read
* and all FRUs must be parsed
*/
-int fmc_device_register_n(struct fmc_device **devs, int n)
+int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw)
{
struct fmc_device *fmc, **devarray;
uint32_t device_id;
@@ -221,6 +280,21 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
else
dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
device_id);
+
+ if (gw) {
+ /*
+ * The carrier already know the bitstream to load
+ * for this set of FMC mezzanines.
+ */
+ ret = fmc->op->reprogram_raw(fmc, NULL,
+ gw->bitstream, gw->len);
+ if (ret) {
+ dev_warn(fmc->hwdev,
+ "Invalid gateware for FMC mezzanine\n");
+ goto out;
+ }
+ }
+
ret = device_add(&fmc->dev);
if (ret < 0) {
dev_err(fmc->hwdev, "Slot %i: Failed in registering "
@@ -234,18 +308,16 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
}
/* This device went well, give information to the user */
fmc_dump_eeprom(fmc);
- fmc_dump_sdb(fmc);
+ fmc_debug_init(fmc);
}
return 0;
out1:
device_del(&fmc->dev);
out:
- fmc_free_id_info(fmc);
- put_device(&fmc->dev);
-
kfree(devarray);
for (i--; i >= 0; i--) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
@@ -254,8 +326,20 @@ out:
return ret;
}
+EXPORT_SYMBOL(fmc_device_register_n_gw);
+
+int fmc_device_register_n(struct fmc_device **devs, int n)
+{
+ return fmc_device_register_n_gw(devs, n, NULL);
+}
EXPORT_SYMBOL(fmc_device_register_n);
+int fmc_device_register_gw(struct fmc_device *fmc, struct fmc_gateware *gw)
+{
+ return fmc_device_register_n_gw(&fmc, 1, gw);
+}
+EXPORT_SYMBOL(fmc_device_register_gw);
+
int fmc_device_register(struct fmc_device *fmc)
{
return fmc_device_register_n(&fmc, 1);
@@ -273,6 +357,7 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
kfree(devs[0]->devarray);
for (i = 0; i < n; i++) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-debug.c b/drivers/fmc/fmc-debug.c
new file mode 100644
index 000000000000..32930722770c
--- /dev/null
+++ b/drivers/fmc/fmc-debug.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/byteorder.h>
+
+#include <linux/fmc.h>
+#include <linux/sdb.h>
+#include <linux/fmc-sdb.h>
+
+#define FMC_DBG_SDB_DUMP "dump_sdb"
+
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+ int i = len - 1;
+
+ memcpy(buf, str, len);
+ buf[len] = '\0';
+ while (i >= 0 && buf[i] == ' ')
+ buf[i--] = '\0';
+ return buf;
+}
+
+#define __sdb_string(buf, field) ({ \
+ BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
+ __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
+ })
+
+/**
+ * We do not check seq_printf() errors because we want to see things in any case
+ */
+static void fmc_sdb_dump_recursive(struct fmc_device *fmc, struct seq_file *s,
+ const struct sdb_array *arr)
+{
+ unsigned long base = arr->baseaddr;
+ int i, j, n = arr->len, level = arr->level;
+ char tmp[64];
+
+ for (i = 0; i < n; i++) {
+ union sdb_record *r;
+ struct sdb_product *p;
+ struct sdb_component *c;
+
+ r = &arr->record[i];
+ c = &r->dev.sdb_component;
+ p = &c->product;
+
+ for (j = 0; j < level; j++)
+ seq_printf(s, " ");
+ switch (r->empty.record_type) {
+ case sdb_type_interconnect:
+ seq_printf(s, "%08llx:%08x %.19s\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name);
+ break;
+ case sdb_type_device:
+ seq_printf(s, "%08llx:%08x %.19s (%08llx-%08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base,
+ __be64_to_cpu(c->addr_last) + base);
+ break;
+ case sdb_type_bridge:
+ seq_printf(s, "%08llx:%08x %.19s (bridge: %08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base);
+ if (IS_ERR(arr->subtree[i])) {
+ seq_printf(s, "SDB: (bridge error %li)\n",
+ PTR_ERR(arr->subtree[i]));
+ break;
+ }
+ fmc_sdb_dump_recursive(fmc, s, arr->subtree[i]);
+ break;
+ case sdb_type_integration:
+ seq_printf(s, "integration\n");
+ break;
+ case sdb_type_repo_url:
+ seq_printf(s, "Synthesis repository: %s\n",
+ __sdb_string(tmp, r->repo_url.repo_url));
+ break;
+ case sdb_type_synthesis:
+ seq_printf(s, "Bitstream '%s' ",
+ __sdb_string(tmp, r->synthesis.syn_name));
+ seq_printf(s, "synthesized %08x by %s ",
+ __be32_to_cpu(r->synthesis.date),
+ __sdb_string(tmp, r->synthesis.user_name));
+ seq_printf(s, "(%s version %x), ",
+ __sdb_string(tmp, r->synthesis.tool_name),
+ __be32_to_cpu(r->synthesis.tool_version));
+ seq_printf(s, "commit %pm\n",
+ r->synthesis.commit_id);
+ break;
+ case sdb_type_empty:
+ seq_printf(s, "empty\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN TYPE 0x%02x\n",
+ r->empty.record_type);
+ break;
+ }
+ }
+}
+
+static int fmc_sdb_dump(struct seq_file *s, void *offset)
+{
+ struct fmc_device *fmc = s->private;
+
+ if (!fmc->sdb) {
+ seq_printf(s, "no SDB information\n");
+ return 0;
+ }
+
+ seq_printf(s, "FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
+ fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
+ /* Dump SDB information */
+ fmc_sdb_dump_recursive(fmc, s, fmc->sdb);
+
+ return 0;
+}
+
+
+static int fmc_sdb_dump_open(struct inode *inode, struct file *file)
+{
+ struct fmc_device *fmc = inode->i_private;
+
+ return single_open(file, fmc_sdb_dump, fmc);
+}
+
+
+const struct file_operations fmc_dbgfs_sdb_dump = {
+ .owner = THIS_MODULE,
+ .open = fmc_sdb_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int fmc_debug_init(struct fmc_device *fmc)
+{
+ fmc->dbg_dir = debugfs_create_dir(dev_name(&fmc->dev), NULL);
+ if (IS_ERR_OR_NULL(fmc->dbg_dir)) {
+ pr_err("FMC: Cannot create debugfs\n");
+ return PTR_ERR(fmc->dbg_dir);
+ }
+
+ fmc->dbg_sdb_dump = debugfs_create_file(FMC_DBG_SDB_DUMP, 0444,
+ fmc->dbg_dir, fmc,
+ &fmc_dbgfs_sdb_dump);
+ if (IS_ERR_OR_NULL(fmc->dbg_sdb_dump))
+ pr_err("FMC: Cannot create debugfs file %s\n",
+ FMC_DBG_SDB_DUMP);
+
+ return 0;
+}
+
+void fmc_debug_exit(struct fmc_device *fmc)
+{
+ if (fmc->dbg_dir)
+ debugfs_remove_recursive(fmc->dbg_dir);
+}
diff --git a/drivers/fmc/fmc-dump.c b/drivers/fmc/fmc-dump.c
index c91afd6388f6..cd1df475b254 100644
--- a/drivers/fmc/fmc-dump.c
+++ b/drivers/fmc/fmc-dump.c
@@ -15,8 +15,6 @@
static int fmc_must_dump_eeprom;
module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
-static int fmc_must_dump_sdb;
-module_param_named(dump_sdb, fmc_must_dump_sdb, int, 0644);
#define LINELEN 16
@@ -59,42 +57,3 @@ void fmc_dump_eeprom(const struct fmc_device *fmc)
for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
prev = dump_line(i, line, prev);
}
-
-void fmc_dump_sdb(const struct fmc_device *fmc)
-{
- const uint8_t *line, *prev;
- int i, len;
-
- if (!fmc->sdb)
- return;
- if (!fmc_must_dump_sdb)
- return;
-
- /* If the argument is not-zero, do simple dump (== show) */
- if (fmc_must_dump_sdb > 0)
- fmc_show_sdb_tree(fmc);
-
- if (fmc_must_dump_sdb == 1)
- return;
-
- /* If bigger than 1, dump it seriously, to help debugging */
-
- /*
- * Here we should really use libsdbfs (which is designed to
- * work in kernel space as well) , but it doesn't support
- * directories yet, and it requires better intergration (it
- * should be used instead of fmc-specific code).
- *
- * So, lazily, just dump the top-level array
- */
- pr_info("FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
- fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
- pr_info("FMC: poor dump of sdb first level:\n");
-
- len = fmc->sdb->len * sizeof(union sdb_record);
- line = (void *)fmc->sdb->record;
- prev = NULL;
- for (i = 0; i < len; i += LINELEN, line += LINELEN)
- prev = dump_line(i, line, prev);
- return;
-}
diff --git a/drivers/fmc/fmc-match.c b/drivers/fmc/fmc-match.c
index 104a5efc2207..a0956d1f7550 100644
--- a/drivers/fmc/fmc-match.c
+++ b/drivers/fmc/fmc-match.c
@@ -63,7 +63,7 @@ int fmc_fill_id_info(struct fmc_device *fmc)
if (!fmc->eeprom)
return -ENOMEM;
allocated = 1;
- ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
+ ret = fmc_read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
if (ret < 0)
goto out;
}
diff --git a/drivers/fmc/fmc-private.h b/drivers/fmc/fmc-private.h
new file mode 100644
index 000000000000..1e5136643bdc
--- /dev/null
+++ b/drivers/fmc/fmc-private.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+extern int fmc_debug_init(struct fmc_device *fmc);
+extern void fmc_debug_exit(struct fmc_device *fmc);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 4603fdb74465..ffdc1762b580 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -127,12 +127,12 @@ int fmc_free_sdb_tree(struct fmc_device *fmc)
EXPORT_SYMBOL(fmc_free_sdb_tree);
/* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
- int sdb_entry)
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry)
{
int ret;
- ret = fmc->op->reprogram(fmc, d, gw);
+ ret = fmc->op->reprogram_raw(fmc, d, gw, len);
if (ret < 0)
return ret;
if (sdb_entry < 0)
@@ -145,108 +145,39 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
sdb_entry);
return -ENODEV;
}
- fmc_dump_sdb(fmc);
- return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram);
-
-static char *__strip_trailing_space(char *buf, char *str, int len)
-{
- int i = len - 1;
- memcpy(buf, str, len);
- while(i >= 0 && buf[i] == ' ')
- buf[i--] = '\0';
- return buf;
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram_raw);
-#define __sdb_string(buf, field) ({ \
- BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
- __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
- })
-
-static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
- const struct sdb_array *arr)
+/* This helper calls reprogram and inizialized sdb as well */
+int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
+ int sdb_entry)
{
- unsigned long base = arr->baseaddr;
- int i, j, n = arr->len, level = arr->level;
- char buf[64];
-
- for (i = 0; i < n; i++) {
- union sdb_record *r;
- struct sdb_product *p;
- struct sdb_component *c;
- r = &arr->record[i];
- c = &r->dev.sdb_component;
- p = &c->product;
+ int ret;
- dev_info(&fmc->dev, "SDB: ");
+ ret = fmc->op->reprogram(fmc, d, gw);
+ if (ret < 0)
+ return ret;
+ if (sdb_entry < 0)
+ return ret;
- for (j = 0; j < level; j++)
- printk(KERN_CONT " ");
- switch (r->empty.record_type) {
- case sdb_type_interconnect:
- printk(KERN_CONT "%08llx:%08x %.19s\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name);
- break;
- case sdb_type_device:
- printk(KERN_CONT "%08llx:%08x %.19s (%08llx-%08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base,
- __be64_to_cpu(c->addr_last) + base);
- break;
- case sdb_type_bridge:
- printk(KERN_CONT "%08llx:%08x %.19s (bridge: %08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base);
- if (IS_ERR(arr->subtree[i])) {
- dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
- break;
- }
- __fmc_show_sdb_tree(fmc, arr->subtree[i]);
- break;
- case sdb_type_integration:
- printk(KERN_CONT "integration\n");
- break;
- case sdb_type_repo_url:
- printk(KERN_CONT "Synthesis repository: %s\n",
- __sdb_string(buf, r->repo_url.repo_url));
- break;
- case sdb_type_synthesis:
- printk(KERN_CONT "Bitstream '%s' ",
- __sdb_string(buf, r->synthesis.syn_name));
- printk(KERN_CONT "synthesized %08x by %s ",
- __be32_to_cpu(r->synthesis.date),
- __sdb_string(buf, r->synthesis.user_name));
- printk(KERN_CONT "(%s version %x), ",
- __sdb_string(buf, r->synthesis.tool_name),
- __be32_to_cpu(r->synthesis.tool_version));
- printk(KERN_CONT "commit %pm\n",
- r->synthesis.commit_id);
- break;
- case sdb_type_empty:
- printk(KERN_CONT "empty\n");
- break;
- default:
- printk(KERN_CONT "UNKNOWN TYPE 0x%02x\n",
- r->empty.record_type);
- break;
- }
+ /* We are required to find SDB at a given offset */
+ ret = fmc_scan_sdb_tree(fmc, sdb_entry);
+ if (ret < 0) {
+ dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
+ sdb_entry);
+ return -ENODEV;
}
+
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram);
void fmc_show_sdb_tree(const struct fmc_device *fmc)
{
- if (!fmc->sdb)
- return;
- __fmc_show_sdb_tree(fmc, fmc->sdb);
+ pr_err("%s: not supported anymore, use debugfs to dump SDB\n",
+ __func__);
}
EXPORT_SYMBOL(fmc_show_sdb_tree);
diff --git a/drivers/fmc/fmc-trivial.c b/drivers/fmc/fmc-trivial.c
index 6c590f54c79d..8defdee3e3a3 100644
--- a/drivers/fmc/fmc-trivial.c
+++ b/drivers/fmc/fmc-trivial.c
@@ -24,7 +24,7 @@ static irqreturn_t t_handler(int irq, void *dev_id)
{
struct fmc_device *fmc = dev_id;
- fmc->op->irq_ack(fmc);
+ fmc_irq_ack(fmc);
dev_info(&fmc->dev, "received irq %i\n", irq);
return IRQ_HANDLED;
}
@@ -46,25 +46,21 @@ static int t_probe(struct fmc_device *fmc)
int ret;
int index = 0;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &t_drv);
+ index = fmc_validate(fmc, &t_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
- ret = fmc->op->irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
+ ret = fmc_irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
if (ret < 0)
return ret;
/* ignore error code of call below, we really don't care */
- fmc->op->gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
+ fmc_gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
- /* Reprogram, if asked to. ESRCH == no filename specified */
- ret = -ESRCH;
- if (fmc->op->reprogram)
- ret = fmc->op->reprogram(fmc, &t_drv, "");
- if (ret == -ESRCH)
+ ret = fmc_reprogram(fmc, &t_drv, "", 0);
+ if (ret == -EPERM) /* programming not supported */
ret = 0;
if (ret < 0)
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
/* FIXME: reprogram LM32 too */
return ret;
@@ -72,7 +68,7 @@ static int t_probe(struct fmc_device *fmc)
static int t_remove(struct fmc_device *fmc)
{
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
return 0;
}
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index 9bb2cbd5c9f2..3eb81bb1f1fc 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -50,7 +50,7 @@ static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
if (write) {
dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
thislen, thisaddr);
- err = fmc->op->write_ee(fmc, thisaddr, p + 5, thislen);
+ err = fmc_write_ee(fmc, thisaddr, p + 5, thislen);
}
if (err < 0) {
dev_err(&fmc->dev, "write failure @0x%04x\n",
@@ -70,7 +70,7 @@ static int fwe_run_bin(struct fmc_device *fmc, const struct firmware *fw)
int ret;
dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
- ret = fmc->op->write_ee(fmc, 0, (void *)fw->data, fw->size);
+ ret = fmc_write_ee(fmc, 0, (void *)fw->data, fw->size);
if (ret < 0) {
dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
return ret;
@@ -115,8 +115,8 @@ static int fwe_probe(struct fmc_device *fmc)
KBUILD_MODNAME);
return -ENODEV;
}
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fwe_drv);
+
+ index = fmc_validate(fmc, &fwe_drv);
if (index < 0) {
pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
dev_name(dev));
diff --git a/drivers/fmc/fru-parse.c b/drivers/fmc/fru-parse.c
index cb46263c5da2..eb21480d399f 100644
--- a/drivers/fmc/fru-parse.c
+++ b/drivers/fmc/fru-parse.c
@@ -31,12 +31,11 @@ static char *__fru_alloc_get_tl(struct fru_common_header *header, int nr)
{
struct fru_type_length *tl;
char *res;
- int len;
tl = __fru_get_board_tl(header, nr);
if (!tl)
return NULL;
- len = fru_strlen(tl);
+
res = fru_alloc(fru_strlen(tl) + 1);
if (!res)
return NULL;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 161ba9dccede..ad5448f718b3 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -2,9 +2,7 @@
# FPGA framework configuration
#
-menu "FPGA Configuration Support"
-
-config FPGA
+menuconfig FPGA
tristate "FPGA Configuration Framework"
help
Say Y here if you want support for configuring FPGAs from the
@@ -26,6 +24,20 @@ config FPGA_MGR_ICE40_SPI
help
FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+config FPGA_MGR_ALTERA_CVP
+ tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ depends on PCI
+ help
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+ and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+
+config FPGA_MGR_ALTERA_PS_SPI
+ tristate "Altera FPGA Passive Serial over SPI"
+ depends on SPI
+ help
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix
+ using the passive serial interface over SPI.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -106,5 +118,3 @@ config XILINX_PR_DECOUPLER
being reprogrammed during partial reconfig.
endif # FPGA
-
-endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 2a4f0218145c..e09895f0525b 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o
+obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o
obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
new file mode 100644
index 000000000000..08629ee69d11
--- /dev/null
+++ b/drivers/fpga/altera-cvp.c
@@ -0,0 +1,500 @@
+/*
+ * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Manage Altera FPGA firmware using PCIe CvP.
+ * Firmware must be in binary "rbf" format.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#define CVP_BAR 0 /* BAR used for data transfer in memory mode */
+#define CVP_DUMMY_WR 244 /* dummy writes to clear CvP state machine */
+#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
+
+/* Vendor Specific Extended Capability Registers */
+#define VSE_PCIE_EXT_CAP_ID 0x200
+#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
+
+#define VSE_CVP_STATUS 0x21c /* 32bit */
+#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
+#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
+#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
+#define VSE_CVP_STATUS_USERMODE BIT(21) /* USERMODE */
+#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
+#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
+
+#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */
+#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
+#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
+
+#define VSE_CVP_DATA 0x228 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */
+#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
+#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+
+#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */
+#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+
+#define DRV_NAME "altera-cvp"
+#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+
+/* Optional CvP config error status check for debugging */
+static bool altera_cvp_chkcfg;
+
+struct altera_cvp_conf {
+ struct fpga_manager *mgr;
+ struct pci_dev *pci_dev;
+ void __iomem *map;
+ void (*write_data)(struct altera_cvp_conf *, u32);
+ char mgr_name[64];
+ u8 numclks;
+};
+
+static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 status;
+
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+
+ if (status & VSE_CVP_STATUS_CFG_DONE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (status & VSE_CVP_STATUS_CVP_EN)
+ return FPGA_MGR_STATE_POWER_UP;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
+{
+ writel(val, conf->map);
+}
+
+static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
+{
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+}
+
+/* switches between CvP clock and internal clock */
+static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
+{
+ unsigned int i;
+ u32 val;
+
+ /* set 1 CVP clock cycle for every CVP Data Register Write */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+
+ for (i = 0; i < CVP_DUMMY_WR; i++)
+ conf->write_data(conf, 0); /* dummy data, could be any value */
+}
+
+static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
+ u32 status_val, int timeout_us)
+{
+ unsigned int retries;
+ u32 val;
+
+ retries = timeout_us / 10;
+ if (timeout_us % 10)
+ retries++;
+
+ do {
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if ((val & status_mask) == status_val)
+ return 0;
+
+ /* use small usleep value to re-check and break early */
+ usleep_range(10, 11);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_teardown(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 val;
+
+ /* STEP 12 - reset START_XFER bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val &= ~VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 13 - reset CVP_CONFIG bit */
+ val &= ~VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /*
+ * STEP 14
+ * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
+ * writes to the HIP
+ */
+ altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+
+ /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+ if (ret)
+ dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
+
+ return ret;
+}
+
+static int altera_cvp_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ u32 iflags, val;
+ int ret;
+
+ iflags = info ? info->flags : 0;
+
+ if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Determine allowed clock to data ratio */
+ if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
+ conf->numclks = 8; /* ratio for all compressed images */
+ else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ conf->numclks = 4; /* for uncompressed and encrypted images */
+ else
+ conf->numclks = 1; /* for uncompressed and unencrypted images */
+
+ /* STEP 1 - read CVP status and check CVP_EN flag */
+ pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+ if (!(val & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ if (val & VSE_CVP_STATUS_CFG_RDY) {
+ dev_warn(&mgr->dev, "CvP already started, teardown first\n");
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * STEP 2
+ * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
+ */
+ /* switch from fabric to PMA clock */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* set CVP mode */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /*
+ * STEP 3
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 4 - set CVP_CONFIG bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ /* request control block to begin transfer using CVP */
+ val |= VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
+ VSE_CVP_STATUS_CFG_RDY, 10);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+
+ /*
+ * STEP 6
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 7 - set START_XFER */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val |= VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ return 0;
+}
+
+static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if (val & VSE_CVP_STATUS_CFG_ERR) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ const u32 *data;
+ size_t done, remaining;
+ int status = 0;
+ u32 mask;
+
+ /* STEP 9 - write 32-bit data from RBF file to CVP data register */
+ data = (u32 *)buf;
+ remaining = count;
+ done = 0;
+
+ while (remaining >= 4) {
+ conf->write_data(conf, *data++);
+ done += 4;
+ remaining -= 4;
+
+ /*
+ * STEP 10 (optional) and STEP 11
+ * - check error flag
+ * - loop until data transfer completed
+ * Config images can be huge (more than 40 MiB), so
+ * only check after a new 4k data block has been written.
+ * This reduces the number of checks and speeds up the
+ * configuration process.
+ */
+ if (altera_cvp_chkcfg && !(done % SZ_4K)) {
+ status = altera_cvp_chk_error(mgr, done);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ /* write up to 3 trailing bytes, if any */
+ mask = BIT(remaining * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+
+ if (altera_cvp_chkcfg)
+ status = altera_cvp_chk_error(mgr, count);
+
+ return status;
+}
+
+static int altera_cvp_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 mask;
+ u32 val;
+
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+
+ /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
+ pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+ if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
+ dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
+ return -EPROTO;
+ }
+
+ /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
+ mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
+ ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+ if (ret)
+ dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
+
+ return ret;
+}
+
+static const struct fpga_manager_ops altera_cvp_ops = {
+ .state = altera_cvp_state,
+ .write_init = altera_cvp_write_init,
+ .write = altera_cvp_write,
+ .write_complete = altera_cvp_write_complete,
+};
+
+static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
+}
+
+static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kstrtobool(buf, &altera_cvp_chkcfg);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id);
+static void altera_cvp_remove(struct pci_dev *pdev);
+
+#define PCI_VENDOR_ID_ALTERA 0x1172
+
+static struct pci_device_id altera_cvp_id_tbl[] = {
+ { PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
+
+static struct pci_driver altera_cvp_driver = {
+ .name = DRV_NAME,
+ .id_table = altera_cvp_id_tbl,
+ .probe = altera_cvp_probe,
+ .remove = altera_cvp_remove,
+};
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct altera_cvp_conf *conf;
+ u16 cmd, val;
+ int ret;
+
+ /*
+ * First check if this is the expected FPGA device. PCI config
+ * space access works without enabling the PCI device, memory
+ * space access is enabled further down.
+ */
+ pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+ if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
+ dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ /*
+ * Enable memory BAR access. We cannot use pci_enable_device() here
+ * because it will make the driver unusable with FPGA devices that
+ * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
+ * platform. Such BARs will not have an assigned address range and
+ * pci_enable_device() will fail, complaining about not claimed BAR,
+ * even if the concerned BAR is not needed for FPGA configuration
+ * at all. Thus, enable the device via PCI config space command.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = pci_request_region(pdev, CVP_BAR, "CVP");
+ if (ret) {
+ dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
+ goto err_disable;
+ }
+
+ conf->pci_dev = pdev;
+ conf->write_data = altera_cvp_write_data_iomem;
+
+ conf->map = pci_iomap(pdev, CVP_BAR, 0);
+ if (!conf->map) {
+ dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
+ conf->write_data = altera_cvp_write_data_config;
+ }
+
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
+ ALTERA_CVP_MGR_NAME, pci_name(pdev));
+
+ ret = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (ret)
+ goto err_unmap;
+
+ ret = driver_create_file(&altera_cvp_driver.driver,
+ &driver_attr_chkcfg);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
+ fpga_mgr_unregister(&pdev->dev);
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+err_disable:
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return ret;
+}
+
+static void altera_cvp_remove(struct pci_dev *pdev)
+{
+ struct fpga_manager *mgr = pci_get_drvdata(pdev);
+ struct altera_cvp_conf *conf = mgr->priv;
+ u16 cmd;
+
+ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+ fpga_mgr_unregister(&pdev->dev);
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+module_pci_driver(altera_cvp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 3066b805f2d0..406d2f10741f 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
/* The L3 REMAP register is write only, so keep a cached value. */
static unsigned int l3_remap_shadow;
-static spinlock_t l3_remap_lock;
+static DEFINE_SPINLOCK(l3_remap_lock);
static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
bool enable)
@@ -143,9 +143,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
int ret;
of_id = of_match_device(altera_fpga_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
priv = (struct altera_hps2fpga_data *)of_id->data;
- priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
+ 0);
if (IS_ERR(priv->bridge_reset)) {
dev_err(dev, "Could not get %s reset control\n", priv->name);
return PTR_ERR(priv->bridge_reset);
@@ -171,8 +177,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
return -EBUSY;
}
- spin_lock_init(&l3_remap_lock);
-
if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
if (enable > 1) {
dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
new file mode 100644
index 000000000000..14f14efdf0d5
--- /dev/null
+++ b/drivers/fpga/altera-ps-spi.c
@@ -0,0 +1,308 @@
+/*
+ * Altera Passive Serial SPI Driver
+ *
+ * Copyright (c) 2017 United Western Technologies, Corporation
+ *
+ * Joshua Clayton <stillcompiling@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Manage Altera FPGA firmware that is loaded over SPI using the passive
+ * serial configuration method.
+ * Firmware must be in binary "rbf" format.
+ * Works on Arria 10, Cyclone V and Stratix V. Should work on Cyclone series.
+ * May work on other Altera FPGAs.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+enum altera_ps_devtype {
+ CYCLONE5,
+ ARRIA10,
+};
+
+struct altera_ps_data {
+ enum altera_ps_devtype devtype;
+ int status_wait_min_us;
+ int status_wait_max_us;
+ int t_cfg_us;
+ int t_st2ck_us;
+};
+
+struct altera_ps_conf {
+ struct gpio_desc *config;
+ struct gpio_desc *confd;
+ struct gpio_desc *status;
+ struct spi_device *spi;
+ const struct altera_ps_data *data;
+ u32 info_flags;
+ char mgr_name[64];
+};
+
+/* | Arria 10 | Cyclone5 | Stratix5 |
+ * t_CF2ST0 | [; 600] | [; 600] | [; 600] |ns
+ * t_CFG | [2;] | [2;] | [2;] |µs
+ * t_STATUS | [268; 3000] | [268; 1506] | [268; 1506] |µs
+ * t_CF2ST1 | [; 3000] | [; 1506] | [; 1506] |µs
+ * t_CF2CK | [3010;] | [1506;] | [1506;] |µs
+ * t_ST2CK | [10;] | [2;] | [2;] |µs
+ * t_CD2UM | [175; 830] | [175; 437] | [175; 437] |µs
+ */
+static struct altera_ps_data c5_data = {
+ /* these values for Cyclone5 are compatible with Stratix5 */
+ .devtype = CYCLONE5,
+ .status_wait_min_us = 268,
+ .status_wait_max_us = 1506,
+ .t_cfg_us = 2,
+ .t_st2ck_us = 2,
+};
+
+static struct altera_ps_data a10_data = {
+ .devtype = ARRIA10,
+ .status_wait_min_us = 268, /* min(t_STATUS) */
+ .status_wait_max_us = 3000, /* max(t_CF2ST1) */
+ .t_cfg_us = 2, /* max { min(t_CFG), max(tCF2ST0) } */
+ .t_st2ck_us = 10, /* min(t_ST2CK) */
+};
+
+static const struct of_device_id of_ef_match[] = {
+ { .compatible = "altr,fpga-passive-serial", .data = &c5_data },
+ { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_ef_match);
+
+static enum fpga_mgr_states altera_ps_state(struct fpga_manager *mgr)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+
+ if (gpiod_get_value_cansleep(conf->status))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline void altera_ps_delay(int delay_us)
+{
+ if (delay_us > 10)
+ usleep_range(delay_us, delay_us + 5);
+ else
+ udelay(delay_us);
+}
+
+static int altera_ps_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ int min, max, waits;
+ int i;
+
+ conf->info_flags = info->flags;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 1);
+
+ /* wait min reset pulse time */
+ altera_ps_delay(conf->data->t_cfg_us);
+
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Status pin failed to show a reset\n");
+ return -EIO;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 0);
+
+ min = conf->data->status_wait_min_us;
+ max = conf->data->status_wait_max_us;
+ waits = max / min;
+ if (max % min)
+ waits++;
+
+ /* wait for max { max(t_STATUS), max(t_CF2ST1) } */
+ for (i = 0; i < waits; i++) {
+ usleep_range(min, min + 10);
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ /* wait for min(t_ST2CK)*/
+ altera_ps_delay(conf->data->t_st2ck_us);
+ return 0;
+ }
+ }
+
+ dev_err(&mgr->dev, "Status pin not ready.\n");
+ return -EIO;
+}
+
+static void rev_buf(char *buf, size_t len)
+{
+ u32 *fw32 = (u32 *)buf;
+ size_t extra_bytes = (len & 0x03);
+ const u32 *fw_end = (u32 *)(buf + len - extra_bytes);
+
+ /* set buffer to lsb first */
+ while (fw32 < fw_end) {
+ *fw32 = bitrev8x4(*fw32);
+ fw32++;
+ }
+
+ if (extra_bytes) {
+ buf = (char *)fw_end;
+ while (extra_bytes) {
+ *buf = bitrev8(*buf);
+ buf++;
+ extra_bytes--;
+ }
+ }
+}
+
+static int altera_ps_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ int ret;
+ size_t stride = min_t(size_t, fw_data_end - fw_data, SZ_4K);
+
+ if (!(conf->info_flags & FPGA_MGR_BITSTREAM_LSB_FIRST))
+ rev_buf((char *)fw_data, stride);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int altera_ps_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char dummy[] = {0};
+ int ret;
+
+ if (gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Error during configuration.\n");
+ return -EIO;
+ }
+
+ if (!IS_ERR(conf->confd)) {
+ if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+ dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * After CONF_DONE goes high, send two additional falling edges on DCLK
+ * to begin initialization and enter user mode
+ */
+ ret = spi_write(conf->spi, dummy, 1);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error during end sequence: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct fpga_manager_ops altera_ps_ops = {
+ .state = altera_ps_state,
+ .write_init = altera_ps_write_init,
+ .write = altera_ps_write,
+ .write_complete = altera_ps_write_complete,
+};
+
+static int altera_ps_probe(struct spi_device *spi)
+{
+ struct altera_ps_conf *conf;
+ const struct of_device_id *of_id;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ of_id = of_match_device(of_ef_match, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
+
+ conf->data = of_id->data;
+ conf->spi = spi;
+ conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+ if (IS_ERR(conf->config)) {
+ dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+ PTR_ERR(conf->config));
+ return PTR_ERR(conf->config);
+ }
+
+ conf->status = devm_gpiod_get(&spi->dev, "nstat", GPIOD_IN);
+ if (IS_ERR(conf->status)) {
+ dev_err(&spi->dev, "Failed to get status gpio: %ld\n",
+ PTR_ERR(conf->status));
+ return PTR_ERR(conf->status);
+ }
+
+ conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+ if (IS_ERR(conf->confd)) {
+ dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ }
+
+ /* Register manager with unique name */
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
+ dev_driver_string(&spi->dev), dev_name(&spi->dev));
+
+ return fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+}
+
+static int altera_ps_remove(struct spi_device *spi)
+{
+ fpga_mgr_unregister(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id altera_ps_spi_ids[] = {
+ {"cyclone-ps-spi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
+
+static struct spi_driver altera_ps_driver = {
+ .driver = {
+ .name = "altera-ps-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_ef_match),
+ },
+ .id_table = altera_ps_spi_ids,
+ .probe = altera_ps_probe,
+ .remove = altera_ps_remove,
+};
+
+module_spi_driver(altera_ps_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joshua Clayton <stillcompiling@gmail.com>");
+MODULE_DESCRIPTION("Module to load Altera FPGA firmware over SPI");
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 3b6b2f4182a1..d9ab7c75b14f 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -319,8 +319,8 @@ static int child_regions_with_firmware(struct device_node *overlay)
of_node_put(child_region);
if (ret)
- pr_err("firmware-name not allowed in child FPGA region: %s",
- child_region->full_name);
+ pr_err("firmware-name not allowed in child FPGA region: %pOF",
+ child_region);
return ret;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 06432d84cbf8..4ea63d9bd131 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -475,7 +475,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_raw_attr = {
+static const struct bin_attribute fsi_slave_raw_attr = {
.attr = {
.name = "raw",
.mode = 0600,
@@ -499,7 +499,7 @@ static ssize_t fsi_slave_sysfs_term_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_term_attr = {
+static const struct bin_attribute fsi_slave_term_attr = {
.attr = {
.name = "term",
.mode = 0200,
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 98d062fd353e..e13353a2fd7c 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -57,12 +57,6 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
int rc;
uint32_t data;
- data = cpu_to_be32(SCOM_RESET_CMD);
- rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
- sizeof(uint32_t));
- if (rc)
- return rc;
-
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
@@ -186,6 +180,7 @@ static const struct file_operations scom_fops = {
static int scom_probe(struct device *dev)
{
+ uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
@@ -202,6 +197,9 @@ static int scom_probe(struct device *dev)
scom->mdev.parent = dev;
list_add(&scom->link, &scom_devices);
+ data = cpu_to_be32(SCOM_RESET_CMD);
+ fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+
return misc_register(&scom->mdev);
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e57cc40cb768..63ac1c6a825f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -177,6 +177,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_gpadl;
+ }
+
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
@@ -421,6 +426,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
if (ret != 0)
@@ -494,6 +504,10 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
list_add_tail(&info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (channel->rescind)
+ goto post_msg_err;
+
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 4bbb8dea4727..968af173c4c1 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -451,6 +451,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/* Make sure this is a new offer */
mutex_lock(&vmbus_connection.channel_mutex);
+ /*
+ * Now that we have acquired the channel_mutex,
+ * we can release the potentially racing rescind thread.
+ */
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (!uuid_le_cmp(channel->offermsg.offer.if_type,
newchannel->offermsg.offer.if_type) &&
@@ -481,7 +487,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
channel->num_sc++;
spin_unlock_irqrestore(&channel->lock, flags);
} else {
- atomic_dec(&vmbus_connection.offer_in_progress);
goto err_free_chan;
}
}
@@ -510,7 +515,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) {
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
- atomic_dec(&vmbus_connection.offer_in_progress);
return;
}
@@ -541,7 +545,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
goto err_deq_chan;
}
- atomic_dec(&vmbus_connection.offer_in_progress);
+ newchannel->probe_done = true;
return;
err_deq_chan:
@@ -882,8 +886,27 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
channel->rescind = true;
spin_unlock_irqrestore(&channel->lock, flags);
+ /*
+ * Now that we have posted the rescind state, perform
+ * rescind related cleanup.
+ */
vmbus_rescind_cleanup(channel);
+ /*
+ * Now wait for offer handling to complete.
+ */
+ while (READ_ONCE(channel->probe_done) == false) {
+ /*
+ * We wait here until any channel offer is currently
+ * being processed.
+ */
+ msleep(1);
+ }
+
+ /*
+ * At this point, the rescind handling can proceed safely.
+ */
+
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f5728deff893..db0e6652d7ef 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -584,10 +584,6 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case MEM_ONLINE:
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- dm_device.num_pages_onlined += mem->nr_pages;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- /* Fall through */
case MEM_CANCEL_ONLINE:
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
@@ -644,6 +640,9 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__online_page_set_limits(pg);
__online_page_increment_counters(pg);
__online_page_free(pg);
+
+ WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+ dm_device.num_pages_onlined++;
}
static void hv_bring_pgs_online(struct hv_hotadd_state *has,
@@ -1036,8 +1035,8 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
if (info_hdr->data_size == sizeof(__u64)) {
__u64 *max_page_count = (__u64 *)&info_hdr[1];
- pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
- *max_page_count);
+ pr_info("Max. dynamic memory size: %llu MB\n",
+ (*max_page_count) >> (20 - PAGE_SHIFT));
}
break;
@@ -1656,6 +1655,7 @@ static int balloon_probe(struct hv_device *dev,
}
dm_device.state = DM_INITIALIZED;
+ last_post_time = jiffies;
return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 9a90b915b5be..5eed1e7da15c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -304,7 +304,7 @@ static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
strlen((char *)in->body.kvp_ip_val.adapter_id),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.adapter_id,
- MAX_IP_ADDR_SIZE);
+ MAX_ADAPTER_ID_SIZE);
if (len < 0)
return len;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1f450c39a9b0..12eb8caa4263 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,7 @@
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include "hyperv_vmbus.h"
@@ -94,30 +95,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->write_index = next_write_location;
}
-/* Get the next read location for the specified ring buffer. */
-static inline u32
-hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
-{
- return ring_info->ring_buffer->read_index;
-}
-
-/*
- * Get the next read location + offset for the specified ring buffer.
- * This allows the caller to skip.
- */
-static inline u32
-hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
- u32 offset)
-{
- u32 next = ring_info->ring_buffer->read_index;
-
- next += offset;
- if (next >= ring_info->ring_datasize)
- next -= ring_info->ring_datasize;
-
- return next;
-}
-
/* Set the next read location for the specified ring buffer. */
static inline void
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
@@ -142,29 +119,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
}
/*
- * Helper routine to copy to source from ring buffer.
- * Assume there is enough room. Handles wrap-around in src case only!!
- */
-static u32 hv_copyfrom_ringbuffer(
- const struct hv_ring_buffer_info *ring_info,
- void *dest,
- u32 destlen,
- u32 start_read_offset)
-{
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
-
- memcpy(dest, ring_buffer + start_read_offset, destlen);
-
- start_read_offset += destlen;
- if (start_read_offset >= ring_buffer_size)
- start_read_offset -= ring_buffer_size;
-
- return start_read_offset;
-}
-
-
-/*
* Helper routine to copy from source to ring buffer.
* Assume there is enough room. Handles wrap-around in dest case only!!
*/
@@ -334,33 +288,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
return 0;
}
-static inline void
-init_cached_read_index(struct hv_ring_buffer_info *rbi)
-{
- rbi->cached_read_index = rbi->ring_buffer->read_index;
-}
-
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw)
{
- u32 bytes_avail_toread;
- u32 next_read_location;
- u64 prev_indices = 0;
- struct vmpacket_descriptor desc;
- u32 offset;
- u32 packetlen;
- struct hv_ring_buffer_info *inring_info = &channel->inbound;
-
- if (buflen <= 0)
+ struct vmpacket_descriptor *desc;
+ u32 packetlen, offset;
+
+ if (unlikely(buflen == 0))
return -EINVAL;
*buffer_actual_len = 0;
*requestid = 0;
- bytes_avail_toread = hv_get_bytes_to_read(inring_info);
/* Make sure there is something to read */
- if (bytes_avail_toread < sizeof(desc)) {
+ desc = hv_pkt_iter_first(channel);
+ if (desc == NULL) {
/*
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
@@ -368,48 +311,22 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return 0;
}
- init_cached_read_index(inring_info);
-
- next_read_location = hv_get_next_read_location(inring_info);
- next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
- sizeof(desc),
- next_read_location);
-
- offset = raw ? 0 : (desc.offset8 << 3);
- packetlen = (desc.len8 << 3) - offset;
+ offset = raw ? 0 : (desc->offset8 << 3);
+ packetlen = (desc->len8 << 3) - offset;
*buffer_actual_len = packetlen;
- *requestid = desc.trans_id;
-
- if (bytes_avail_toread < packetlen + offset)
- return -EAGAIN;
+ *requestid = desc->trans_id;
- if (packetlen > buflen)
+ if (unlikely(packetlen > buflen))
return -ENOBUFS;
- next_read_location =
- hv_get_next_readlocation_withoffset(inring_info, offset);
+ /* since ring is double mapped, only one copy is necessary */
+ memcpy(buffer, (const char *)desc + offset, packetlen);
- next_read_location = hv_copyfrom_ringbuffer(inring_info,
- buffer,
- packetlen,
- next_read_location);
+ /* Advance ring index to next packet descriptor */
+ __hv_pkt_iter_next(channel, desc);
- next_read_location = hv_copyfrom_ringbuffer(inring_info,
- &prev_indices,
- sizeof(u64),
- next_read_location);
-
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_mb();
-
- /* Update the read index */
- hv_set_next_read_location(inring_info, next_read_location);
-
- hv_signal_on_read(channel);
+ /* Notify host of update */
+ hv_pkt_iter_close(channel);
return 0;
}
@@ -440,14 +357,16 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /* set state for later hv_signal_on_read() */
- init_cached_read_index(rbi);
+ struct vmpacket_descriptor *desc;
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
- return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ if (desc)
+ prefetch((char *)desc + (desc->len8 << 3));
+
+ return desc;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
@@ -471,10 +390,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
rbi->priv_read_index -= dsize;
/* more data? */
- if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
- return NULL;
- else
- return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ return hv_pkt_iter_first(channel);
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
@@ -484,6 +400,7 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 orig_write_sz = hv_get_bytes_to_write(rbi);
/*
* Make sure all reads are done before we update the read index since
@@ -493,6 +410,40 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
virt_rmb();
rbi->ring_buffer->read_index = rbi->priv_read_index;
- hv_signal_on_read(channel);
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ /* If host has disabled notifications then skip */
+ if (rbi->ring_buffer->interrupt_mask)
+ return;
+
+ if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
+ u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+
+ /*
+ * If there was space before we began iteration,
+ * then host was not blocked. Also handles case where
+ * pending_sz is zero then host has nothing pending
+ * and does not need to be signaled.
+ */
+ if (orig_write_sz > pending_sz)
+ return;
+
+ /* If pending write will not fit, don't give false hope. */
+ if (hv_get_bytes_to_write(rbi) < pending_sz)
+ return;
+ }
+
+ vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index ed84e96715a0..43160a2eafe0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -940,6 +940,9 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->offermsg.child_relid != relid)
continue;
+ if (channel->rescind)
+ continue;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 8d55d6d79015..ef9cb3c164e1 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -70,13 +70,13 @@ config CORESIGHT_SOURCE_ETM4X
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
-config CORESIGHT_QCOM_REPLICATOR
- bool "Qualcomm CoreSight Replicator driver"
+config CORESIGHT_DYNAMIC_REPLICATOR
+ bool "CoreSight Programmable Replicator driver"
depends on CORESIGHT_LINKS_AND_SINKS
help
- This enables support for Qualcomm CoreSight link driver. The
- programmable ATB replicator sends the ATB trace stream from the
- ETB/ETF to the TPIUi and ETR.
+ This enables support for dynamic CoreSight replicator link driver.
+ The programmable ATB replicator allows independent filtering of the
+ trace data based on the traceid.
config CORESIGHT_STM
bool "CoreSight System Trace Macrocell driver"
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 433d59025eb6..5bae90ce794d 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -14,6 +14,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
coresight-etm4x-sysfs.o
-obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 64a77e00eaa6..6ea62c62ff27 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -667,7 +667,7 @@ static int debug_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id debug_ids[] = {
+static const struct amba_id debug_ids[] = {
{ /* Debug for Cortex-A53 */
.id = 0x000bbd03,
.mask = 0x000fffff,
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index 0a3d15f0b009..accc2056f7c6 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -95,6 +95,28 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
+#define coresight_replicator_reg(name, offset) \
+ coresight_simple_reg32(struct replicator_state, name, offset)
+
+coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
+coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
+
+static struct attribute *replicator_mgmt_attrs[] = {
+ &dev_attr_idfilter0.attr,
+ &dev_attr_idfilter1.attr,
+ NULL,
+};
+
+static const struct attribute_group replicator_mgmt_group = {
+ .attrs = replicator_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *replicator_groups[] = {
+ &replicator_mgmt_group,
+ NULL,
+};
+
static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -139,11 +161,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
desc.ops = &replicator_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
+ desc.groups = replicator_groups;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
return 0;
}
@@ -175,18 +197,22 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
NULL)
};
-static struct amba_id replicator_ids[] = {
+static const struct amba_id replicator_ids[] = {
{
.id = 0x0003b909,
.mask = 0x0003ffff,
- .data = "REPLICATOR 1.0",
+ },
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9ec,
+ .mask = 0x000fffff,
},
{ 0, 0 },
};
static struct amba_driver replicator_driver = {
.drv = {
- .name = "coresight-replicator-qcom",
+ .name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index d5b96423e1a5..56ecd7aff5eb 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -200,8 +200,10 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
static void etb_dump_hw(struct etb_drvdata *drvdata)
{
+ bool lost = false;
int i;
u8 *buf_ptr;
+ const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -223,20 +225,26 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
}
if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
- & ETB_STATUS_RAM_FULL) == 0)
+ & ETB_STATUS_RAM_FULL) == 0) {
writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
- else
+ } else {
writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+ lost = true;
+ }
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
+ barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- *buf_ptr++ = read_data >> 0;
- *buf_ptr++ = read_data >> 8;
- *buf_ptr++ = read_data >> 16;
- *buf_ptr++ = read_data >> 24;
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
+ *(u32 *)buf_ptr = read_data;
+ buf_ptr += 4;
}
if (frame_off) {
@@ -353,8 +361,10 @@ static void etb_update_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
+ bool lost = false;
int i, cur;
u8 *buf_ptr;
+ const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
u32 status, read_data, to_read;
unsigned long offset;
@@ -366,8 +376,8 @@ static void etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
- CS_UNLOCK(drvdata->base);
etb_disable_hw(drvdata);
+ CS_UNLOCK(drvdata->base);
/* unit is in words, not bytes */
read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
@@ -384,7 +394,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
(unsigned long)write_ptr);
write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
}
/*
@@ -395,7 +405,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
*/
status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
if (status & ETB_STATUS_RAM_FULL) {
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
to_read = capacity;
read_ptr = write_ptr;
} else {
@@ -428,22 +438,30 @@ static void etb_update_buffer(struct coresight_device *csdev,
if (read_ptr > (drvdata->buffer_depth - 1))
read_ptr -= drvdata->buffer_depth;
/* let the decoder know we've skipped ahead */
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
}
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
/* finally tell HW where we want to start reading from */
writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
cur = buf->cur;
offset = buf->offset;
+ barrier = barrier_pkt;
+
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- *buf_ptr++ = read_data >> 0;
- *buf_ptr++ = read_data >> 8;
- *buf_ptr++ = read_data >> 16;
- *buf_ptr++ = read_data >> 24;
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
+ *(u32 *)buf_ptr = read_data;
+ buf_ptr += 4;
offset += 4;
if (offset >= PAGE_SIZE) {
@@ -557,17 +575,17 @@ static const struct file_operations etb_fops = {
.llseek = no_llseek,
};
-#define coresight_etb10_simple_func(name, offset) \
- coresight_simple_func(struct etb_drvdata, NULL, name, offset)
+#define coresight_etb10_reg(name, offset) \
+ coresight_simple_reg32(struct etb_drvdata, name, offset)
-coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
-coresight_etb10_simple_func(sts, ETB_STATUS_REG);
-coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
-coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
-coresight_etb10_simple_func(trg, ETB_TRG);
-coresight_etb10_simple_func(ctl, ETB_CTL_REG);
-coresight_etb10_simple_func(ffsr, ETB_FFSR);
-coresight_etb10_simple_func(ffcr, ETB_FFCR);
+coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
+coresight_etb10_reg(sts, ETB_STATUS_REG);
+coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
+coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
+coresight_etb10_reg(trg, ETB_TRG);
+coresight_etb10_reg(ctl, ETB_CTL_REG);
+coresight_etb10_reg(ffsr, ETB_FFSR);
+coresight_etb10_reg(ffcr, ETB_FFCR);
static struct attribute *coresight_etb_mgmt_attrs[] = {
&dev_attr_rdp.attr,
@@ -728,7 +746,7 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
};
-static struct amba_id etb_ids[] = {
+static const struct amba_id etb_ids[] = {
{
.id = 0x0003b907,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 8f546f59a3fd..8a0ad77574e7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -53,14 +53,16 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/* ETMv3.5/PTM's ETMCR is 'config' */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
+PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
+ &format_attr_retstack.attr,
NULL,
};
-static struct attribute_group etm_pmu_format_group = {
+static const struct attribute_group etm_pmu_format_group = {
.name = "format",
.attrs = etm_config_formats_attr,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index ad063d7444e1..70b0a248c321 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -106,6 +106,7 @@
#define ETMTECR1_START_STOP BIT(25)
/* ETMCCER - 0x1E8 */
#define ETMCCER_TIMESTAMP BIT(22)
+#define ETMCCER_RETSTACK BIT(23)
#define ETM_MODE_EXCLUDE BIT(0)
#define ETM_MODE_CYCACC BIT(1)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index ca98ad13bb8c..6e547ec6fead 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1232,19 +1232,19 @@ static struct attribute *coresight_etm_attrs[] = {
NULL,
};
-#define coresight_etm3x_simple_func(name, offset) \
- coresight_simple_func(struct etm_drvdata, NULL, name, offset)
-
-coresight_etm3x_simple_func(etmccr, ETMCCR);
-coresight_etm3x_simple_func(etmccer, ETMCCER);
-coresight_etm3x_simple_func(etmscr, ETMSCR);
-coresight_etm3x_simple_func(etmidr, ETMIDR);
-coresight_etm3x_simple_func(etmcr, ETMCR);
-coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
-coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
-coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
-coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
+#define coresight_etm3x_reg(name, offset) \
+ coresight_simple_reg32(struct etm_drvdata, name, offset)
+
+coresight_etm3x_reg(etmccr, ETMCCR);
+coresight_etm3x_reg(etmccer, ETMCCER);
+coresight_etm3x_reg(etmscr, ETMSCR);
+coresight_etm3x_reg(etmidr, ETMIDR);
+coresight_etm3x_reg(etmcr, ETMCR);
+coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
+coresight_etm3x_reg(etmteevr, ETMTEEVR);
+coresight_etm3x_reg(etmtssvr, ETMTSSCR);
+coresight_etm3x_reg(etmtecr1, ETMTECR1);
+coresight_etm3x_reg(etmtecr2, ETMTECR2);
static struct attribute *coresight_etm_mgmt_attrs[] = {
&dev_attr_etmccr.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 93ee8fc539be..e5b1ec57dbde 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -243,6 +243,8 @@ void etm_set_default(struct etm_config *config)
}
config->ctxid_mask = 0x0;
+ /* Setting default to 1024 as per TRM recommendation */
+ config->sync_freq = 0x400;
}
void etm_config_trace_mode(struct etm_config *config)
@@ -308,7 +310,9 @@ void etm_config_trace_mode(struct etm_config *config)
config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
}
-#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
+ ETMCR_TIMESTAMP_EN | \
+ ETMCR_RETURN_STACK)
static int etm_parse_event_config(struct etm_drvdata *drvdata,
struct perf_event *event)
@@ -339,14 +343,24 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
etm_config_trace_mode(config);
/*
- * At this time only cycle accurate and timestamp options are
- * available.
+ * At this time only cycle accurate, return stack and timestamp
+ * options are available.
*/
if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
return -EINVAL;
config->ctrl = attr->config;
+ /*
+ * Possible to have cores with PTM (supports ret stack) and ETM
+ * (never has ret stack) on the same SoC. So if we have a request
+ * for return stack that can't be honoured on this core then
+ * clear the bit - trace will still continue normally
+ */
+ if ((config->ctrl & ETMCR_RETURN_STACK) &&
+ !(drvdata->etmccer & ETMCCER_RETSTACK))
+ config->ctrl &= ~ETMCR_RETURN_STACK;
+
return 0;
}
@@ -885,7 +899,7 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
};
-static struct amba_id etm_ids[] = {
+static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
.id = 0x0003b921,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index b9b1e9c8f4c4..4e6eab53e34e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2066,23 +2066,23 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
return reg.data;
}
-#define coresight_etm4x_simple_func(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
+#define coresight_etm4x_reg(name, offset) \
+ coresight_simple_reg32(struct etmv4_drvdata, name, offset)
#define coresight_etm4x_cross_read(name, offset) \
coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
name, offset)
-coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
-coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
-coresight_etm4x_simple_func(trclsr, TRCLSR);
-coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_simple_func(trcdevid, TRCDEVID);
-coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
-coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
-coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
-coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+coresight_etm4x_reg(trcpdcr, TRCPDCR);
+coresight_etm4x_reg(trcpdsr, TRCPDSR);
+coresight_etm4x_reg(trclsr, TRCLSR);
+coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_reg(trcdevid, TRCDEVID);
+coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_reg(trcpidr0, TRCPIDR0);
+coresight_etm4x_reg(trcpidr1, TRCPIDR1);
+coresight_etm4x_reg(trcpidr2, TRCPIDR2);
+coresight_etm4x_reg(trcpidr3, TRCPIDR3);
coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 532adc9dd32a..cf364a514c12 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -224,6 +224,10 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
if (attr->config & BIT(ETM_OPT_TS))
/* bit[11], Global timestamp tracing bit */
config->cfg |= BIT(11);
+ /* return stack - enable if selected and supported */
+ if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
+ /* bit[12], Return stack enable bit */
+ config->cfg |= BIT(12);
out:
return ret;
@@ -1048,7 +1052,7 @@ err_arch_supported:
return ret;
}
-static struct amba_id etm4_ids[] = {
+static const struct amba_id etm4_ids[] = {
{ /* ETM 4.0 - Cortex-A53 */
.id = 0x000bb95d,
.mask = 0x000fffff,
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 860fe6ef5632..77642e0e955b 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -246,11 +246,16 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
};
-static struct amba_id funnel_ids[] = {
+static const struct amba_id funnel_ids[] = {
{
.id = 0x0003b908,
.mask = 0x0003ffff,
},
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9eb,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 5f662d82052c..f1d0e21d8cab 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -39,23 +39,33 @@
#define ETM_MODE_EXCL_USER BIT(31)
typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
-#define coresight_simple_func(type, func, name, offset) \
+#define __coresight_simple_func(type, func, name, lo_off, hi_off) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
{ \
type *drvdata = dev_get_drvdata(_dev->parent); \
coresight_read_fn fn = func; \
- u32 val; \
+ u64 val; \
pm_runtime_get_sync(_dev->parent); \
if (fn) \
- val = fn(_dev->parent, offset); \
+ val = (u64)fn(_dev->parent, lo_off); \
else \
- val = readl_relaxed(drvdata->base + offset); \
+ val = coresight_read_reg_pair(drvdata->base, \
+ lo_off, hi_off); \
pm_runtime_put_sync(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); \
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val); \
} \
static DEVICE_ATTR_RO(name)
+#define coresight_simple_func(type, func, name, offset) \
+ __coresight_simple_func(type, func, name, offset, -1)
+#define coresight_simple_reg32(type, name, offset) \
+ __coresight_simple_func(type, NULL, name, offset, -1)
+#define coresight_simple_reg64(type, name, lo_off, hi_off) \
+ __coresight_simple_func(type, NULL, name, lo_off, hi_off)
+
+extern const u32 barrier_pkt[5];
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -106,6 +116,25 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
+static inline u64
+coresight_read_reg_pair(void __iomem *addr, s32 lo_offset, s32 hi_offset)
+{
+ u64 val;
+
+ val = readl_relaxed(addr + lo_offset);
+ val |= (hi_offset < 0) ? 0 :
+ (u64)readl_relaxed(addr + hi_offset) << 32;
+ return val;
+}
+
+static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
+ s32 lo_offset, s32 hi_offset)
+{
+ writel_relaxed((u32)val, addr + lo_offset);
+ if (hi_offset >= 0)
+ writel_relaxed((u32)(val >> 32), addr + hi_offset);
+}
+
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 93fc26f01bab..92a780a6df1d 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -276,7 +276,7 @@ static void stm_disable(struct coresight_device *csdev,
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
- coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
+ coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(drvdata->dev);
@@ -307,7 +307,8 @@ static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
return ((unsigned long)addr & (write_bytes - 1));
}
-static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+static void stm_send(void __iomem *addr, const void *data,
+ u32 size, u8 write_bytes)
{
u8 paload[8];
@@ -414,7 +415,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
unsigned int size,
const unsigned char *payload)
{
- unsigned long ch_addr;
+ void __iomem *ch_addr;
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
@@ -424,7 +425,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
if (channel >= drvdata->numsp)
return -EINVAL;
- ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
+ ch_addr = stm_channel_addr(drvdata, channel);
flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
flags |= test_bit(channel, drvdata->chs.guaranteed) ?
@@ -437,20 +438,20 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
switch (packet) {
case STP_PACKET_FLAG:
- ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+ ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags);
/*
* The generic STM core sets a size of '0' on flag packets.
* As such send a flag packet of size '1' and tell the
* core we did so.
*/
- stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
+ stm_send(ch_addr, payload, 1, drvdata->write_bytes);
size = 1;
break;
case STP_PACKET_DATA:
- ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
- stm_send((void *)ch_addr, payload, size,
+ ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags);
+ stm_send(ch_addr, payload, size,
drvdata->write_bytes);
break;
@@ -635,21 +636,21 @@ static ssize_t traceid_store(struct device *dev,
}
static DEVICE_ATTR_RW(traceid);
-#define coresight_stm_simple_func(name, offset) \
- coresight_simple_func(struct stm_drvdata, NULL, name, offset)
-
-coresight_stm_simple_func(tcsr, STMTCSR);
-coresight_stm_simple_func(tsfreqr, STMTSFREQR);
-coresight_stm_simple_func(syncr, STMSYNCR);
-coresight_stm_simple_func(sper, STMSPER);
-coresight_stm_simple_func(spter, STMSPTER);
-coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
-coresight_stm_simple_func(spscr, STMSPSCR);
-coresight_stm_simple_func(spmscr, STMSPMSCR);
-coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
-coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
-coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
-coresight_stm_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_stm_reg(name, offset) \
+ coresight_simple_reg32(struct stm_drvdata, name, offset)
+
+coresight_stm_reg(tcsr, STMTCSR);
+coresight_stm_reg(tsfreqr, STMTSFREQR);
+coresight_stm_reg(syncr, STMSYNCR);
+coresight_stm_reg(sper, STMSPER);
+coresight_stm_reg(spter, STMSPTER);
+coresight_stm_reg(privmaskr, STMPRIVMASKR);
+coresight_stm_reg(spscr, STMSPSCR);
+coresight_stm_reg(spmscr, STMSPMSCR);
+coresight_stm_reg(spfeat1r, STMSPFEAT1R);
+coresight_stm_reg(spfeat2r, STMSPFEAT2R);
+coresight_stm_reg(spfeat3r, STMSPFEAT3R);
+coresight_stm_reg(devid, CORESIGHT_DEVID);
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
@@ -914,7 +915,7 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
};
-static struct amba_id stm_ids[] = {
+static const struct amba_id stm_ids[] = {
{
.id = 0x0003b962,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index e3b9fb82eb8d..e2513b786242 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -43,17 +43,34 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
+ bool lost = false;
char *bufp;
- u32 read_data;
+ const u32 *barrier;
+ u32 read_data, status;
int i;
+ /*
+ * Get a hold of the status register and see if a wrap around
+ * has occurred.
+ */
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ if (status & TMC_STS_FULL)
+ lost = true;
+
bufp = drvdata->buf;
drvdata->len = 0;
+ barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
return;
+
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
@@ -369,9 +386,11 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
+ bool lost = false;
int i, cur;
+ const u32 *barrier;
u32 *buf_ptr;
- u32 read_ptr, write_ptr;
+ u64 read_ptr, write_ptr;
u32 status, to_read;
unsigned long offset;
struct cs_buffers *buf = sink_config;
@@ -388,8 +407,8 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
tmc_flush_and_stop(drvdata);
- read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
- write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
+ read_ptr = tmc_read_rrp(drvdata);
+ write_ptr = tmc_read_rwp(drvdata);
/*
* Get a hold of the status register and see if a wrap around
@@ -397,7 +416,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
*/
status = readl_relaxed(drvdata->base + TMC_STS);
if (status & TMC_STS_FULL) {
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
to_read = drvdata->size;
} else {
to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
@@ -441,18 +460,27 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
if (read_ptr > (drvdata->size - 1))
read_ptr -= drvdata->size;
/* Tell the HW */
- writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ tmc_write_rrp(drvdata, read_ptr);
+ lost = true;
}
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
cur = buf->cur;
offset = buf->offset;
+ barrier = barrier_pkt;
/* for every byte to read */
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
+ if (lost && *barrier) {
+ *buf_ptr = *barrier;
+ barrier++;
+ }
+
offset += 4;
if (offset >= PAGE_SIZE) {
offset = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 5d312699b3b9..68fbc8f7450e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -22,7 +22,7 @@
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
- u32 axictl;
+ u32 axictl, sts;
/* Zero out the memory to help with debug */
memset(drvdata->vaddr, 0, drvdata->size);
@@ -36,17 +36,29 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
- axictl |= TMC_AXICTL_WR_BURST_16;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl = (axictl &
- ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
- TMC_AXICTL_PROT_CTL_B1;
+ axictl &= ~TMC_AXICTL_CLEAR_MASK;
+ axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+ axictl |= TMC_AXICTL_AXCACHE_OS;
+
+ if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
+ axictl &= ~TMC_AXICTL_ARCACHE_MASK;
+ axictl |= TMC_AXICTL_ARCACHE_OS;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ tmc_write_dba(drvdata, drvdata->paddr);
+ /*
+ * If the TMC pointers must be programmed before the session,
+ * we have to set it properly (i.e, RRP/RWP to base address and
+ * STS to "not full").
+ */
+ if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
+ tmc_write_rrp(drvdata, drvdata->paddr);
+ tmc_write_rwp(drvdata, drvdata->paddr);
+ sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
+ writel_relaxed(sts, drvdata->base + TMC_STS);
+ }
- writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
- writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
@@ -59,9 +71,12 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
{
- u32 rwp, val;
+ const u32 *barrier;
+ u32 val;
+ u32 *temp;
+ u64 rwp;
- rwp = readl_relaxed(drvdata->base + TMC_RWP);
+ rwp = tmc_read_rwp(drvdata);
val = readl_relaxed(drvdata->base + TMC_STS);
/*
@@ -71,6 +86,16 @@ static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
if (val & TMC_STS_FULL) {
drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
drvdata->len = drvdata->size;
+
+ barrier = barrier_pkt;
+ temp = (u32 *)drvdata->buf;
+
+ while (*barrier) {
+ *temp = *barrier;
+ temp++;
+ barrier++;
+ }
+
} else {
drvdata->buf = drvdata->vaddr;
drvdata->len = rwp - drvdata->paddr;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 864488793f09..2ff4a66a3caa 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -217,20 +217,24 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
return memwidth;
}
-#define coresight_tmc_simple_func(name, offset) \
- coresight_simple_func(struct tmc_drvdata, NULL, name, offset)
-
-coresight_tmc_simple_func(rsz, TMC_RSZ);
-coresight_tmc_simple_func(sts, TMC_STS);
-coresight_tmc_simple_func(rrp, TMC_RRP);
-coresight_tmc_simple_func(rwp, TMC_RWP);
-coresight_tmc_simple_func(trg, TMC_TRG);
-coresight_tmc_simple_func(ctl, TMC_CTL);
-coresight_tmc_simple_func(ffsr, TMC_FFSR);
-coresight_tmc_simple_func(ffcr, TMC_FFCR);
-coresight_tmc_simple_func(mode, TMC_MODE);
-coresight_tmc_simple_func(pscr, TMC_PSCR);
-coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_tmc_reg(name, offset) \
+ coresight_simple_reg32(struct tmc_drvdata, name, offset)
+#define coresight_tmc_reg64(name, lo_off, hi_off) \
+ coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
+
+coresight_tmc_reg(rsz, TMC_RSZ);
+coresight_tmc_reg(sts, TMC_STS);
+coresight_tmc_reg(trg, TMC_TRG);
+coresight_tmc_reg(ctl, TMC_CTL);
+coresight_tmc_reg(ffsr, TMC_FFSR);
+coresight_tmc_reg(ffcr, TMC_FFCR);
+coresight_tmc_reg(mode, TMC_MODE);
+coresight_tmc_reg(pscr, TMC_PSCR);
+coresight_tmc_reg(axictl, TMC_AXICTL);
+coresight_tmc_reg(devid, CORESIGHT_DEVID);
+coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
+coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
+coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_rsz.attr,
@@ -244,6 +248,8 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_pscr.attr,
&dev_attr_devid.attr,
+ &dev_attr_dba.attr,
+ &dev_attr_axictl.attr,
NULL,
};
@@ -293,6 +299,42 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+/* Detect and initialise the capabilities of a TMC ETR */
+static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
+ u32 devid, void *dev_caps)
+{
+ u32 dma_mask = 0;
+
+ /* Set the unadvertised capabilities */
+ tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
+
+ if (!(devid & TMC_DEVID_NOSCAT))
+ tmc_etr_set_cap(drvdata, TMC_ETR_SG);
+
+ /* Check if the AXI address width is available */
+ if (devid & TMC_DEVID_AXIAW_VALID)
+ dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
+ TMC_DEVID_AXIAW_MASK);
+
+ /*
+ * Unless specified in the device configuration, ETR uses a 40-bit
+ * AXI master in place of the embedded SRAM of ETB/ETF.
+ */
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
+ break;
+ default:
+ dma_mask = 40;
+ }
+
+ return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -354,25 +396,29 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
desc.groups = coresight_tmc_groups;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ break;
+ case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
- /*
- * ETR configuration uses a 40-bit AXI master in place of
- * the embedded SRAM of ETB/ETF.
- */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ ret = tmc_etr_setup_caps(drvdata, devid, id->data);
if (ret)
goto out;
- } else {
+ break;
+ case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
+ break;
+ default:
+ pr_err("%s: Unsupported TMC config\n", pdata->name);
+ ret = -EINVAL;
+ goto out;
}
drvdata->csdev = coresight_register(&desc);
@@ -391,11 +437,27 @@ out:
return ret;
}
-static struct amba_id tmc_ids[] = {
+static const struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
.mask = 0x0003ffff,
},
+ {
+ /* Coresight SoC 600 TMC-ETR/ETS */
+ .id = 0x000bb9e8,
+ .mask = 0x000fffff,
+ .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
+ },
+ {
+ /* Coresight SoC 600 TMC-ETB */
+ .id = 0x000bb9e9,
+ .mask = 0x000fffff,
+ },
+ {
+ /* Coresight SoC 600 TMC-ETF */
+ .id = 0x000bb9ea,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 51c01851533e..8df7a813f537 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -54,11 +54,32 @@
#define TMC_STS_TMCREADY_BIT 2
#define TMC_STS_FULL BIT(0)
#define TMC_STS_TRIGGERED BIT(1)
-/* TMC_AXICTL - 0x110 */
+/*
+ * TMC_AXICTL - 0x110
+ *
+ * TMC AXICTL format for SoC-400
+ * Bits [0-1] : ProtCtrlBit0-1
+ * Bits [2-5] : CacheCtrlBits 0-3 (AXCACHE)
+ * Bit 6 : Reserved
+ * Bit 7 : ScatterGatherMode
+ * Bits [8-11] : WrBurstLen
+ * Bits [12-31] : Reserved.
+ * TMC AXICTL format for SoC-600, as above except:
+ * Bits [2-5] : AXI WCACHE
+ * Bits [16-19] : AXI RCACHE
+ * Bits [20-31] : Reserved
+ */
+#define TMC_AXICTL_CLEAR_MASK 0xfbf
+#define TMC_AXICTL_ARCACHE_MASK (0xf << 16)
+
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
#define TMC_AXICTL_WR_BURST_16 0xF00
+/* Write-back Read and Write-allocate */
+#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
+#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
+
/* TMC_FFCR - 0x304 */
#define TMC_FFCR_FLUSHMAN_BIT 6
#define TMC_FFCR_EN_FMT BIT(0)
@@ -69,6 +90,12 @@
#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+#define TMC_DEVID_NOSCAT BIT(24)
+
+#define TMC_DEVID_AXIAW_VALID BIT(16)
+#define TMC_DEVID_AXIAW_SHIFT 17
+#define TMC_DEVID_AXIAW_MASK 0x7f
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -88,6 +115,24 @@ enum tmc_mem_intf_width {
TMC_MEM_INTF_WIDTH_256BITS = 8,
};
+/* TMC ETR Capability bit definitions */
+#define TMC_ETR_SG (0x1U << 0)
+/* ETR has separate read/write cache encodings */
+#define TMC_ETR_AXI_ARCACHE (0x1U << 1)
+/*
+ * TMC_ETR_SAVE_RESTORE - Values of RRP/RWP/STS.Full are
+ * retained when TMC leaves Disabled state, allowing us to continue
+ * the tracing from a point where we stopped. This also implies that
+ * the RRP/RWP/STS.Full should always be programmed to the correct
+ * value. Unfortunately this is not advertised by the hardware,
+ * so we have to rely on PID of the IP to detect the functionality.
+ */
+#define TMC_ETR_SAVE_RESTORE (0x1U << 2)
+
+/* Coresight SoC-600 TMC-ETR unadvertised capabilities */
+#define CORESIGHT_SOC_600_ETR_CAPS \
+ (TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -104,6 +149,8 @@ enum tmc_mem_intf_width {
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
* @trigger_cntr: amount of words to store after a trigger.
+ * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
+ * device configuration register (DEVID)
*/
struct tmc_drvdata {
void __iomem *base;
@@ -121,6 +168,7 @@ struct tmc_drvdata {
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
+ u32 etr_caps;
};
/* Generic functions */
@@ -139,4 +187,39 @@ extern const struct coresight_ops tmc_etf_cs_ops;
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+
+
+#define TMC_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+tmc_read_##name(struct tmc_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
+TMC_REG_PAIR(rwp, TMC_RWP, TMC_RWPHI)
+TMC_REG_PAIR(dba, TMC_DBALO, TMC_DBAHI)
+
+/* Initialise the caps from unadvertised static capabilities of the device */
+static inline void tmc_etr_init_caps(struct tmc_drvdata *drvdata, u32 dev_caps)
+{
+ WARN_ON(drvdata->etr_caps);
+ drvdata->etr_caps = dev_caps;
+}
+
+static inline void tmc_etr_set_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+ drvdata->etr_caps |= cap;
+}
+
+static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+ return !!(drvdata->etr_caps & cap);
+}
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 0673baf0f2f5..d7a3e453016d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -192,7 +192,7 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
};
-static struct amba_id tpiu_ids[] = {
+static const struct amba_id tpiu_ids[] = {
{
.id = 0x0003b912,
.mask = 0x0003ffff,
@@ -201,6 +201,11 @@ static struct amba_id tpiu_ids[] = {
.id = 0x0004b912,
.mask = 0x0007ffff,
},
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9e7,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6a0202b7384f..b8091bef21dc 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -53,6 +53,14 @@ static DEFINE_PER_CPU(struct list_head *, tracer_path);
*/
static struct list_head *stm_path;
+/*
+ * When losing synchronisation a new barrier packet needs to be inserted at the
+ * beginning of the data collected in a buffer. That way the decoder knows that
+ * it needs to look for another sync sequence.
+ */
+const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff, 0x0};
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 8da567abc0ce..1a023e30488c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -101,17 +101,53 @@ out_pm:
return ret;
}
+static void intel_th_device_remove(struct intel_th_device *thdev);
+
static int intel_th_remove(struct device *dev)
{
struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
struct intel_th_device *thdev = to_intel_th_device(dev);
- struct intel_th_device *hub = to_intel_th_device(dev->parent);
+ struct intel_th_device *hub = to_intel_th_hub(thdev);
int err;
if (thdev->type == INTEL_TH_SWITCH) {
+ struct intel_th *th = to_intel_th(hub);
+ int i, lowest;
+
+ /* disconnect outputs */
err = device_for_each_child(dev, thdev, intel_th_child_remove);
if (err)
return err;
+
+ /*
+ * Remove outputs, that is, hub's children: they are created
+ * at hub's probe time by having the hub call
+ * intel_th_output_enable() for each of them.
+ */
+ for (i = 0, lowest = -1; i < th->num_thdevs; i++) {
+ /*
+ * Move the non-output devices from higher up the
+ * th->thdev[] array to lower positions to maintain
+ * a contiguous array.
+ */
+ if (th->thdev[i]->type != INTEL_TH_OUTPUT) {
+ if (lowest >= 0) {
+ th->thdev[lowest] = th->thdev[i];
+ th->thdev[i] = NULL;
+ ++lowest;
+ }
+
+ continue;
+ }
+
+ if (lowest == -1)
+ lowest = i;
+
+ intel_th_device_remove(th->thdev[i]);
+ th->thdev[i] = NULL;
+ }
+
+ th->num_thdevs = lowest;
}
if (thdrv->attr_group)
@@ -156,21 +192,6 @@ static struct device_type intel_th_source_device_type = {
.release = intel_th_device_release,
};
-static struct intel_th *to_intel_th(struct intel_th_device *thdev)
-{
- /*
- * subdevice tree is flat: if this one is not a switch, its
- * parent must be
- */
- if (thdev->type != INTEL_TH_SWITCH)
- thdev = to_intel_th_hub(thdev);
-
- if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
- return NULL;
-
- return dev_get_drvdata(thdev->dev.parent);
-}
-
static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
@@ -205,6 +226,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ struct intel_th *th = to_intel_th(thdev);
int ret = 0;
if (!thdrv)
@@ -215,15 +237,28 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
pm_runtime_get_sync(&thdev->dev);
+ if (th->activate)
+ ret = th->activate(th);
+ if (ret)
+ goto fail_put;
+
if (thdrv->activate)
ret = thdrv->activate(thdev);
else
intel_th_trace_enable(thdev);
- if (ret) {
- pm_runtime_put(&thdev->dev);
- module_put(thdrv->driver.owner);
- }
+ if (ret)
+ goto fail_deactivate;
+
+ return 0;
+
+fail_deactivate:
+ if (th->deactivate)
+ th->deactivate(th);
+
+fail_put:
+ pm_runtime_put(&thdev->dev);
+ module_put(thdrv->driver.owner);
return ret;
}
@@ -232,6 +267,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ struct intel_th *th = to_intel_th(thdev);
if (!thdrv)
return;
@@ -241,6 +277,9 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
else
intel_th_trace_disable(thdev);
+ if (th->deactivate)
+ th->deactivate(th);
+
pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
@@ -326,10 +365,10 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name,
struct device *parent;
struct intel_th_device *thdev;
- if (type == INTEL_TH_SWITCH)
- parent = th->dev;
- else
+ if (type == INTEL_TH_OUTPUT)
parent = &th->hub->dev;
+ else
+ parent = th->dev;
thdev = kzalloc(sizeof(*thdev) + strlen(name) + 1, GFP_KERNEL);
if (!thdev)
@@ -392,13 +431,14 @@ static const struct intel_th_subdevice {
unsigned otype;
unsigned scrpd;
int id;
-} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
+} intel_th_subdevices[] = {
{
.nres = 1,
.res = {
{
+ /* Handle TSCU from GTH driver */
.start = REG_GTH_OFFSET,
- .end = REG_GTH_OFFSET + REG_GTH_LENGTH - 1,
+ .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
.flags = IORESOURCE_MEM,
},
},
@@ -483,6 +523,21 @@ static const struct intel_th_subdevice {
.nres = 1,
.res = {
{
+ .start = REG_PTI_OFFSET,
+ .end = REG_PTI_OFFSET + REG_PTI_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ },
+ .id = -1,
+ .name = "lpp",
+ .type = INTEL_TH_OUTPUT,
+ .otype = GTH_LPP,
+ .scrpd = SCRPD_PTI_IS_PRIM_DEST,
+ },
+ {
+ .nres = 1,
+ .res = {
+ {
.start = REG_DCIH_OFFSET,
.end = REG_DCIH_OFFSET + REG_DCIH_LENGTH - 1,
.flags = IORESOURCE_MEM,
@@ -526,98 +581,182 @@ static inline void intel_th_request_hub_module_flush(struct intel_th *th)
}
#endif /* CONFIG_MODULES */
-static int intel_th_populate(struct intel_th *th, struct resource *devres,
- unsigned int ndevres, int irq)
+static struct intel_th_device *
+intel_th_subdevice_alloc(struct intel_th *th,
+ const struct intel_th_subdevice *subdev)
{
+ struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
- int src, dst, err;
+ int r, err;
- /* create devices for each intel_th_subdevice */
- for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
- const struct intel_th_subdevice *subdev =
- &intel_th_subdevices[src];
- struct intel_th_device *thdev;
- int r;
+ thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
+ subdev->id);
+ if (!thdev)
+ return ERR_PTR(-ENOMEM);
- /* only allow SOURCE and SWITCH devices in host mode */
- if (host_mode && subdev->type == INTEL_TH_OUTPUT)
- continue;
+ thdev->drvdata = th->drvdata;
+
+ memcpy(res, subdev->res,
+ sizeof(struct resource) * subdev->nres);
+
+ for (r = 0; r < subdev->nres; r++) {
+ struct resource *devres = th->resource;
+ int bar = TH_MMIO_CONFIG;
+
+ /*
+ * Take .end == 0 to mean 'take the whole bar',
+ * .start then tells us which bar it is. Default to
+ * TH_MMIO_CONFIG.
+ */
+ if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
+ bar = res[r].start;
+ res[r].start = 0;
+ res[r].end = resource_size(&devres[bar]) - 1;
+ }
+
+ if (res[r].flags & IORESOURCE_MEM) {
+ res[r].start += devres[bar].start;
+ res[r].end += devres[bar].start;
- thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
- subdev->id);
- if (!thdev) {
- err = -ENOMEM;
- goto kill_subdevs;
+ dev_dbg(th->dev, "%s:%d @ %pR\n",
+ subdev->name, r, &res[r]);
+ } else if (res[r].flags & IORESOURCE_IRQ) {
+ res[r].start = th->irq;
}
+ }
- memcpy(res, subdev->res,
- sizeof(struct resource) * subdev->nres);
+ err = intel_th_device_add_resources(thdev, res, subdev->nres);
+ if (err) {
+ put_device(&thdev->dev);
+ goto fail_put_device;
+ }
- for (r = 0; r < subdev->nres; r++) {
- int bar = TH_MMIO_CONFIG;
+ if (subdev->type == INTEL_TH_OUTPUT) {
+ thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+ thdev->output.type = subdev->otype;
+ thdev->output.port = -1;
+ thdev->output.scratchpad = subdev->scrpd;
+ } else if (subdev->type == INTEL_TH_SWITCH) {
+ thdev->host_mode = host_mode;
+ th->hub = thdev;
+ }
- /*
- * Take .end == 0 to mean 'take the whole bar',
- * .start then tells us which bar it is. Default to
- * TH_MMIO_CONFIG.
- */
- if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
- bar = res[r].start;
- res[r].start = 0;
- res[r].end = resource_size(&devres[bar]) - 1;
- }
+ err = device_add(&thdev->dev);
+ if (err) {
+ put_device(&thdev->dev);
+ goto fail_free_res;
+ }
- if (res[r].flags & IORESOURCE_MEM) {
- res[r].start += devres[bar].start;
- res[r].end += devres[bar].start;
+ /* need switch driver to be loaded to enumerate the rest */
+ if (subdev->type == INTEL_TH_SWITCH && !req) {
+ err = intel_th_request_hub_module(th);
+ if (!err)
+ req++;
+ }
- dev_dbg(th->dev, "%s:%d @ %pR\n",
- subdev->name, r, &res[r]);
- } else if (res[r].flags & IORESOURCE_IRQ) {
- res[r].start = irq;
- }
- }
+ return thdev;
- err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
- goto kill_subdevs;
- }
+fail_free_res:
+ kfree(thdev->resource);
- if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, dst);
- thdev->output.type = subdev->otype;
- thdev->output.port = -1;
- thdev->output.scratchpad = subdev->scrpd;
- } else if (subdev->type == INTEL_TH_SWITCH) {
- thdev->host_mode = host_mode;
- }
+fail_put_device:
+ put_device(&thdev->dev);
+
+ return ERR_PTR(err);
+}
- err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
- goto kill_subdevs;
+/**
+ * intel_th_output_enable() - find and enable a device for a given output type
+ * @th: Intel TH instance
+ * @otype: output type
+ *
+ * Go through the unallocated output devices, find the first one whos type
+ * matches @otype and instantiate it. These devices are removed when the hub
+ * device is removed, see intel_th_remove().
+ */
+int intel_th_output_enable(struct intel_th *th, unsigned int otype)
+{
+ struct intel_th_device *thdev;
+ int src = 0, dst = 0;
+
+ for (src = 0, dst = 0; dst <= th->num_thdevs; src++, dst++) {
+ for (; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ if (intel_th_subdevices[src].type != INTEL_TH_OUTPUT)
+ continue;
+
+ if (intel_th_subdevices[src].otype != otype)
+ continue;
+
+ break;
}
- /* need switch driver to be loaded to enumerate the rest */
- if (subdev->type == INTEL_TH_SWITCH && !req) {
- th->hub = thdev;
- err = intel_th_request_hub_module(th);
- if (!err)
- req++;
+ /* no unallocated matching subdevices */
+ if (src == ARRAY_SIZE(intel_th_subdevices))
+ return -ENODEV;
+
+ for (; dst < th->num_thdevs; dst++) {
+ if (th->thdev[dst]->type != INTEL_TH_OUTPUT)
+ continue;
+
+ if (th->thdev[dst]->output.type != otype)
+ continue;
+
+ break;
}
- th->thdev[dst++] = thdev;
+ /*
+ * intel_th_subdevices[src] matches our requirements and is
+ * not matched in th::thdev[]
+ */
+ if (dst == th->num_thdevs)
+ goto found;
}
+ return -ENODEV;
+
+found:
+ thdev = intel_th_subdevice_alloc(th, &intel_th_subdevices[src]);
+ if (IS_ERR(thdev))
+ return PTR_ERR(thdev);
+
+ th->thdev[th->num_thdevs++] = thdev;
+
return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_output_enable);
+
+static int intel_th_populate(struct intel_th *th)
+{
+ int src;
+
+ /* create devices for each intel_th_subdevice */
+ for (src = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ const struct intel_th_subdevice *subdev =
+ &intel_th_subdevices[src];
+ struct intel_th_device *thdev;
+
+ /* only allow SOURCE and SWITCH devices in host mode */
+ if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+ continue;
-kill_subdevs:
- for (; dst >= 0; dst--)
- intel_th_device_remove(th->thdev[dst]);
+ /*
+ * don't enable port OUTPUTs in this path; SWITCH enables them
+ * via intel_th_output_enable()
+ */
+ if (subdev->type == INTEL_TH_OUTPUT &&
+ subdev->otype != GTH_NONE)
+ continue;
+
+ thdev = intel_th_subdevice_alloc(th, subdev);
+ /* note: caller should free subdevices from th::thdev[] */
+ if (IS_ERR(thdev))
+ return PTR_ERR(thdev);
+
+ th->thdev[th->num_thdevs++] = thdev;
+ }
- return err;
+ return 0;
}
static int match_devt(struct device *dev, void *data)
@@ -670,8 +809,8 @@ static const struct file_operations intel_th_output_fops = {
* @irq: irq number
*/
struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
- unsigned int ndevres, int irq)
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+ struct resource *devres, unsigned int ndevres, int irq)
{
struct intel_th *th;
int err;
@@ -693,6 +832,11 @@ intel_th_alloc(struct device *dev, struct resource *devres,
goto err_ida;
}
th->dev = dev;
+ th->drvdata = drvdata;
+
+ th->resource = devres;
+ th->num_resources = ndevres;
+ th->irq = irq;
dev_set_drvdata(dev, th);
@@ -700,18 +844,15 @@ intel_th_alloc(struct device *dev, struct resource *devres,
pm_runtime_put(dev);
pm_runtime_allow(dev);
- err = intel_th_populate(th, devres, ndevres, irq);
- if (err)
- goto err_chrdev;
+ err = intel_th_populate(th);
+ if (err) {
+ /* free the subdevices and undo everything */
+ intel_th_free(th);
+ return ERR_PTR(err);
+ }
return th;
-err_chrdev:
- pm_runtime_forbid(dev);
-
- __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
- "intel_th/output");
-
err_ida:
ida_simple_remove(&intel_th_ida, th->id);
@@ -727,11 +868,15 @@ void intel_th_free(struct intel_th *th)
int i;
intel_th_request_hub_module_flush(th);
- for (i = 0; i < TH_SUBDEVICE_MAX; i++)
- if (th->thdev[i] && th->thdev[i] != th->hub)
- intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
+ for (i = 0; i < th->num_thdevs; i++) {
+ if (th->thdev[i] != th->hub)
+ intel_th_device_remove(th->thdev[i]);
+ th->thdev[i] = NULL;
+ }
+
+ th->num_thdevs = 0;
pm_runtime_get_sync(th->dev);
pm_runtime_forbid(th->dev);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index dd32d0bad687..018678ec3c13 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -285,16 +285,16 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
*/
static int intel_th_gth_reset(struct gth_device *gth)
{
- u32 scratchpad;
+ u32 reg;
int port, i;
- scratchpad = ioread32(gth->base + REG_GTH_SCRPD0);
- if (scratchpad & SCRPD_DEBUGGER_IN_USE)
+ reg = ioread32(gth->base + REG_GTH_SCRPD0);
+ if (reg & SCRPD_DEBUGGER_IN_USE)
return -EBUSY;
/* Always save/restore STH and TU registers in S0ix entry/exit */
- scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
- iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+ reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+ iowrite32(reg, gth->base + REG_GTH_SCRPD0);
/* output ports */
for (port = 0; port < 8; port++) {
@@ -512,6 +512,15 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
iowrite32(reg, gth->base + REG_GTH_SCRPD0);
}
+static void gth_tscu_resync(struct gth_device *gth)
+{
+ u32 reg;
+
+ reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
+ reg &= ~TSUCTRL_CTCRESYNC;
+ iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
+}
+
/**
* intel_th_gth_enable() - enable tracing to an output device
* @thdev: GTH device
@@ -524,6 +533,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ struct intel_th *th = to_intel_th(thdev);
u32 scr = 0xfc0000, scrpd;
int master;
@@ -539,6 +549,9 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
output->active = true;
spin_unlock(&gth->gth_lock);
+ if (INTEL_TH_CAP(th, tscu_enable))
+ gth_tscu_resync(gth);
+
scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
scrpd |= output->scratchpad;
iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
@@ -639,6 +652,7 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
static int intel_th_gth_probe(struct intel_th_device *thdev)
{
struct device *dev = &thdev->dev;
+ struct intel_th *th = dev_get_drvdata(dev->parent);
struct gth_device *gth;
struct resource *res;
void __iomem *base;
@@ -660,6 +674,8 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->base = base;
spin_lock_init(&gth->gth_lock);
+ dev_set_drvdata(dev, gth);
+
/*
* Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
* bit. Either way, don't reset HW in this case, and don't export any
@@ -667,7 +683,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
* drivers to ports, see intel_th_gth_assign().
*/
if (thdev->host_mode)
- goto done;
+ return 0;
ret = intel_th_gth_reset(gth);
if (ret) {
@@ -676,7 +692,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
thdev->host_mode = true;
- goto done;
+ return 0;
}
for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
@@ -687,6 +703,13 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->output[i].index = i;
gth->output[i].port_type =
gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
+ if (gth->output[i].port_type == GTH_NONE)
+ continue;
+
+ ret = intel_th_output_enable(th, gth->output[i].port_type);
+ /* -ENODEV is ok, we just won't have that device enumerated */
+ if (ret && ret != -ENODEV)
+ return ret;
}
if (intel_th_output_attributes(gth) ||
@@ -698,9 +721,6 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
return -ENOMEM;
}
-done:
- dev_set_drvdata(dev, gth);
-
return 0;
}
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 56f0d2620577..f3d234251a12 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -55,9 +55,14 @@ enum {
REG_GTH_SCRPD1 = 0xe4, /* ScratchPad[1] */
REG_GTH_SCRPD2 = 0xe8, /* ScratchPad[2] */
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
+ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
+ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
};
/* waiting for Pipeline Empty bit(s) to assert for GTH */
#define GTH_PLE_WAITLOOP_DEPTH 10000
+#define TSUCTRL_CTCRESYNC BIT(0)
+#define TSCUSTAT_CTCSYNCING BIT(1)
+
#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 3096e7054f6d..99ad563fc40d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -48,8 +48,19 @@ struct intel_th_output {
};
/**
+ * struct intel_th_drvdata - describes hardware capabilities and quirks
+ * @tscu_enable: device needs SW to enable time stamping unit
+ */
+struct intel_th_drvdata {
+ unsigned int tscu_enable : 1;
+};
+
+#define INTEL_TH_CAP(_th, _cap) ((_th)->drvdata ? (_th)->drvdata->_cap : 0)
+
+/**
* struct intel_th_device - device on the intel_th bus
* @dev: device
+ * @drvdata: hardware capabilities/quirks
* @resource: array of resources available to this device
* @num_resources: number of resources in @resource array
* @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
@@ -59,11 +70,12 @@ struct intel_th_output {
* @name: device name to match the driver
*/
struct intel_th_device {
- struct device dev;
- struct resource *resource;
- unsigned int num_resources;
- unsigned int type;
- int id;
+ struct device dev;
+ struct intel_th_drvdata *drvdata;
+ struct resource *resource;
+ unsigned int num_resources;
+ unsigned int type;
+ int id;
/* INTEL_TH_SWITCH specific */
bool host_mode;
@@ -96,6 +108,17 @@ intel_th_device_get_resource(struct intel_th_device *thdev, unsigned int type,
return NULL;
}
+/*
+ * GTH, output ports configuration
+ */
+enum {
+ GTH_NONE = 0,
+ GTH_MSU, /* memory/usb */
+ GTH_CTP, /* Common Trace Port */
+ GTH_LPP, /* Low Power Path */
+ GTH_PTI, /* MIPI-PTI */
+};
+
/**
* intel_th_output_assigned() - if an output device is assigned to a switch port
* @thdev: the output device
@@ -106,7 +129,8 @@ static inline bool
intel_th_output_assigned(struct intel_th_device *thdev)
{
return thdev->type == INTEL_TH_OUTPUT &&
- thdev->output.port >= 0;
+ (thdev->output.port >= 0 ||
+ thdev->output.type == GTH_NONE);
}
/**
@@ -161,8 +185,18 @@ struct intel_th_driver {
#define to_intel_th_driver_or_null(_d) \
((_d) ? to_intel_th_driver(_d) : NULL)
+/*
+ * Subdevice tree structure is as follows:
+ * + struct intel_th device (pci; dev_{get,set}_drvdata()
+ * + struct intel_th_device INTEL_TH_SWITCH (GTH)
+ * + struct intel_th_device INTEL_TH_OUTPUT (MSU, PTI)
+ * + struct intel_th_device INTEL_TH_SOURCE (STH)
+ *
+ * In other words, INTEL_TH_OUTPUT devices are children of INTEL_TH_SWITCH;
+ * INTEL_TH_SWITCH and INTEL_TH_SOURCE are children of the intel_th device.
+ */
static inline struct intel_th_device *
-to_intel_th_hub(struct intel_th_device *thdev)
+to_intel_th_parent(struct intel_th_device *thdev)
{
struct device *parent = thdev->dev.parent;
@@ -172,9 +206,20 @@ to_intel_th_hub(struct intel_th_device *thdev)
return to_intel_th_device(parent);
}
+static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+ if (thdev->type == INTEL_TH_OUTPUT)
+ thdev = to_intel_th_parent(thdev);
+
+ if (WARN_ON_ONCE(!thdev || thdev->type == INTEL_TH_OUTPUT))
+ return NULL;
+
+ return dev_get_drvdata(thdev->dev.parent);
+}
+
struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
- unsigned int ndevres, int irq);
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+ struct resource *devres, unsigned int ndevres, int irq);
void intel_th_free(struct intel_th *th);
int intel_th_driver_register(struct intel_th_driver *thdrv);
@@ -184,6 +229,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev);
int intel_th_trace_disable(struct intel_th_device *thdev);
int intel_th_set_output(struct intel_th_device *thdev,
unsigned int master);
+int intel_th_output_enable(struct intel_th *th, unsigned int otype);
enum {
TH_MMIO_CONFIG = 0,
@@ -191,8 +237,9 @@ enum {
TH_MMIO_END,
};
-#define TH_SUBDEVICE_MAX 6
#define TH_POSSIBLE_OUTPUTS 8
+/* Total number of possible subdevices: outputs + GTH + STH */
+#define TH_SUBDEVICE_MAX (TH_POSSIBLE_OUTPUTS + 2)
#define TH_CONFIGURABLE_MASTERS 256
#define TH_MSC_MAX 2
@@ -201,6 +248,10 @@ enum {
* @dev: driver core's device
* @thdev: subdevices
* @hub: "switch" subdevice (GTH)
+ * @resource: resources of the entire controller
+ * @num_thdevs: number of devices in the @thdev array
+ * @num_resources: number or resources in the @resource array
+ * @irq: irq number
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
*/
@@ -209,6 +260,14 @@ struct intel_th {
struct intel_th_device *thdev[TH_SUBDEVICE_MAX];
struct intel_th_device *hub;
+ struct intel_th_drvdata *drvdata;
+
+ struct resource *resource;
+ int (*activate)(struct intel_th *);
+ void (*deactivate)(struct intel_th *);
+ unsigned int num_thdevs;
+ unsigned int num_resources;
+ int irq;
int id;
int major;
@@ -220,6 +279,17 @@ struct intel_th {
#endif
};
+static inline struct intel_th_device *
+to_intel_th_hub(struct intel_th_device *thdev)
+{
+ if (thdev->type == INTEL_TH_SWITCH)
+ return thdev;
+ else if (thdev->type == INTEL_TH_OUTPUT)
+ return to_intel_th_parent(thdev);
+
+ return to_intel_th(thdev)->hub;
+}
+
/*
* Register windows
*/
@@ -228,6 +298,10 @@ enum {
REG_GTH_OFFSET = 0x0000,
REG_GTH_LENGTH = 0x2000,
+ /* Timestamp counter unit (TSCU) */
+ REG_TSCU_OFFSET = 0x2000,
+ REG_TSCU_LENGTH = 0x1000,
+
/* Software Trace Hub (STH) [0x4000..0x4fff] */
REG_STH_OFFSET = 0x4000,
REG_STH_LENGTH = 0x2000,
@@ -250,16 +324,6 @@ enum {
};
/*
- * GTH, output ports configuration
- */
-enum {
- GTH_NONE = 0,
- GTH_MSU, /* memory/usb */
- GTH_CTP, /* Common Trace Port */
- GTH_PTI = 4, /* MIPI-PTI */
-};
-
-/*
* Scratchpad bits: tell firmware and external debuggers
* what we are up to.
*/
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index dbbe31df74df..dfb57eaa9f22 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -709,17 +709,17 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
}
for (i = 0; i < nr_blocks; i++) {
- win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size,
- &win->block[i].addr,
- GFP_KERNEL);
+ win->block[i].bdesc =
+ dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
+ &win->block[i].addr, GFP_KERNEL);
+
+ if (!win->block[i].bdesc)
+ goto err_nomem;
#ifdef CONFIG_X86
/* Set the page as uncached */
set_memory_uc((unsigned long)win->block[i].bdesc, 1);
#endif
-
- if (!win->block[i].bdesc)
- goto err_nomem;
}
win->msc = msc;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 590cf90dd21a..bc9cebc30526 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -27,9 +27,53 @@
#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
+#define PCI_REG_NPKDSC 0x80
+#define NPKDSC_TSACT BIT(5)
+
+static int intel_th_pci_activate(struct intel_th *th)
+{
+ struct pci_dev *pdev = to_pci_dev(th->dev);
+ u32 npkdsc;
+ int err;
+
+ if (!INTEL_TH_CAP(th, tscu_enable))
+ return 0;
+
+ err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+ if (!err) {
+ npkdsc |= NPKDSC_TSACT;
+ err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+ }
+
+ if (err)
+ dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+
+ return err;
+}
+
+static void intel_th_pci_deactivate(struct intel_th *th)
+{
+ struct pci_dev *pdev = to_pci_dev(th->dev);
+ u32 npkdsc;
+ int err;
+
+ if (!INTEL_TH_CAP(th, tscu_enable))
+ return;
+
+ err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+ if (!err) {
+ npkdsc |= NPKDSC_TSACT;
+ err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+ }
+
+ if (err)
+ dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+}
+
static int intel_th_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct intel_th_drvdata *drvdata = (void *)id->driver_data;
struct intel_th *th;
int err;
@@ -41,11 +85,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- th = intel_th_alloc(&pdev->dev, pdev->resource,
+ th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
DEVICE_COUNT_RESOURCE, pdev->irq);
if (IS_ERR(th))
return PTR_ERR(th);
+ th->activate = intel_th_pci_activate;
+ th->deactivate = intel_th_pci_deactivate;
+
+ pci_set_master(pdev);
+
return 0;
}
@@ -56,6 +105,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
intel_th_free(th);
}
+static const struct intel_th_drvdata intel_th_2x = {
+ .tscu_enable = 1,
+};
+
static const struct pci_device_id intel_th_pci_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9d26),
@@ -93,7 +146,17 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
- .driver_data = (kernel_ulong_t)0,
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 35738b5bfccd..e96a1fcb57b2 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -1,7 +1,7 @@
/*
* Intel(R) Trace Hub PTI output driver
*
- * Copyright (C) 2014-2015 Intel Corporation.
+ * Copyright (C) 2014-2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,8 @@ struct pti_device {
unsigned int freeclk;
unsigned int clkdiv;
unsigned int patgen;
+ unsigned int lpp_dest_mask;
+ unsigned int lpp_dest;
};
/* map PTI widths to MODE settings of PTI_CTL register */
@@ -163,6 +165,7 @@ static int intel_th_pti_activate(struct intel_th_device *thdev)
ctl |= PTI_FCEN;
ctl |= pti->mode << __ffs(PTI_MODE);
ctl |= pti->clkdiv << __ffs(PTI_CLKDIV);
+ ctl |= pti->lpp_dest << __ffs(LPP_DEST);
iowrite32(ctl, pti->base + REG_PTI_CTL);
@@ -192,6 +195,15 @@ static void read_hw_config(struct pti_device *pti)
pti->mode = pti_width_mode(4);
if (!pti->clkdiv)
pti->clkdiv = 1;
+
+ if (pti->thdev->output.type == GTH_LPP) {
+ if (ctl & LPP_PTIPRESENT)
+ pti->lpp_dest_mask |= LPP_DEST_PTI;
+ if (ctl & LPP_BSSBPRESENT)
+ pti->lpp_dest_mask |= LPP_DEST_EXI;
+ if (ctl & LPP_DEST)
+ pti->lpp_dest = 1;
+ }
}
static int intel_th_pti_probe(struct intel_th_device *thdev)
@@ -239,10 +251,103 @@ static struct intel_th_driver intel_th_pti_driver = {
},
};
-module_driver(intel_th_pti_driver,
- intel_th_driver_register,
- intel_th_driver_unregister);
+static const char * const lpp_dest_str[] = { "pti", "exi" };
+
+static ssize_t lpp_dest_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pti_device *pti = dev_get_drvdata(dev);
+ ssize_t ret = 0;
+ int i;
+
+ for (i = ARRAY_SIZE(lpp_dest_str) - 1; i >= 0; i--) {
+ const char *fmt = pti->lpp_dest == i ? "[%s] " : "%s ";
+
+ if (!(pti->lpp_dest_mask & BIT(i)))
+ continue;
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ fmt, lpp_dest_str[i]);
+ }
+
+ if (ret)
+ buf[ret - 1] = '\n';
+
+ return ret;
+}
+
+static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pti_device *pti = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
+ if (sysfs_streq(buf, lpp_dest_str[i]))
+ break;
+
+ if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
+ pti->lpp_dest = i;
+ ret = size;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(lpp_dest);
+
+static struct attribute *lpp_output_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_freerunning_clock.attr,
+ &dev_attr_clock_divider.attr,
+ &dev_attr_lpp_dest.attr,
+ NULL,
+};
+
+static struct attribute_group lpp_output_group = {
+ .attrs = lpp_output_attrs,
+};
+
+static struct intel_th_driver intel_th_lpp_driver = {
+ .probe = intel_th_pti_probe,
+ .remove = intel_th_pti_remove,
+ .activate = intel_th_pti_activate,
+ .deactivate = intel_th_pti_deactivate,
+ .attr_group = &lpp_output_group,
+ .driver = {
+ .name = "lpp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init intel_th_pti_lpp_init(void)
+{
+ int err;
+
+ err = intel_th_driver_register(&intel_th_pti_driver);
+ if (err)
+ return err;
+
+ err = intel_th_driver_register(&intel_th_lpp_driver);
+ if (err) {
+ intel_th_driver_unregister(&intel_th_pti_driver);
+ return err;
+ }
+
+ return 0;
+}
+
+module_init(intel_th_pti_lpp_init);
+
+static void __exit intel_th_pti_lpp_exit(void)
+{
+ intel_th_driver_unregister(&intel_th_pti_driver);
+ intel_th_driver_unregister(&intel_th_lpp_driver);
+}
+
+module_exit(intel_th_pti_lpp_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel(R) Trace Hub PTI output driver");
+MODULE_DESCRIPTION("Intel(R) Trace Hub PTI/LPP output driver");
MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index 20883f5628cf..30827be67b4c 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -23,7 +23,15 @@ enum {
#define PTI_EN BIT(0)
#define PTI_FCEN BIT(1)
#define PTI_MODE 0xf0
+#define LPP_PTIPRESENT BIT(8)
+#define LPP_BSSBPRESENT BIT(9)
#define PTI_CLKDIV 0x000f0000
#define PTI_PATGENMODE 0x00f00000
+#define LPP_DEST BIT(25)
+#define LPP_BSSBACT BIT(30)
+#define LPP_LPPBUSY BIT(31)
+
+#define LPP_DEST_PTI BIT(0)
+#define LPP_DEST_EXI BIT(1)
#endif /* __INTEL_TH_STH_H__ */
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 0e731143f6a4..9414900575d8 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -566,7 +566,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (copy_from_user(&size, arg, sizeof(size)))
return -EFAULT;
- if (size >= PATH_MAX + sizeof(*id))
+ if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
return -EINVAL;
/*
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index d072c088ce73..945091a88354 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -114,6 +114,12 @@ static struct resource sc24_fpga_resource = {
.flags = IORESOURCE_MEM,
};
+static struct resource sc31_fpga_resource = {
+ .start = 0xf000e000,
+ .end = 0xf000e000 + CHAM_HEADER_SIZE,
+ .flags = IORESOURCE_MEM,
+};
+
static struct platform_driver mcb_lpc_driver = {
.driver = {
.name = "mcb-lpc",
@@ -132,6 +138,15 @@ static const struct dmi_system_id mcb_lpc_dmi_table[] = {
.driver_data = (void *)&sc24_fpga_resource,
.callback = mcb_lpc_create_platform_device,
},
+ {
+ .ident = "SC31",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
+ },
+ .driver_data = (void *)&sc31_fpga_resource,
+ .callback = mcb_lpc_create_platform_device,
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index ee7fb6ec96bd..7369bda3442f 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -182,7 +182,7 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
int num_cells = 0;
uint32_t dtype;
int bar_count;
- int ret = 0;
+ int ret;
u32 hsize;
hsize = sizeof(struct chameleon_fpga_header);
@@ -210,8 +210,10 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
header->filename);
bar_count = chameleon_get_bar(&p, mapbase, &cb);
- if (bar_count < 0)
+ if (bar_count < 0) {
+ ret = bar_count;
goto free_header;
+ }
for_each_chameleon_cell(dtype, p) {
switch (dtype) {
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b0b766416306..d84819dc2468 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -60,6 +60,7 @@ lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_refcount.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index c6cc3dc8ae1f..d8ac036f01ab 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -197,7 +197,7 @@ static struct attribute *mid_att_als[] = {
NULL
};
-static struct attribute_group m_als_gr = {
+static const struct attribute_group m_als_gr = {
.name = "apds9802als",
.attrs = mid_att_als
};
@@ -298,7 +298,7 @@ static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend,
#define APDS9802ALS_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct i2c_device_id apds9802als_id[] = {
+static const struct i2c_device_id apds9802als_id[] = {
{ DRIVER_NAME, 0 },
{ }
};
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 84e5b949399e..c9f07032c2fc 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1051,7 +1051,7 @@ static struct attribute *sysfs_attrs_ctrl[] = {
NULL
};
-static struct attribute_group apds990x_attribute_group[] = {
+static const struct attribute_group apds990x_attribute_group[] = {
{.attrs = sysfs_attrs_ctrl },
};
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
index 593905565b74..cb78c98bc78d 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -20,6 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -51,6 +52,13 @@
#define HICRB_ENSNP0D BIT(14)
#define HICRB_ENSNP1D BIT(15)
+struct aspeed_lpc_snoop_model_data {
+ /* The ast2400 has bits 14 and 15 as reserved, whereas the ast2500
+ * can use them.
+ */
+ unsigned int has_hicrb_ensnp;
+};
+
struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
@@ -123,10 +131,13 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
}
static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
- int channel, u16 lpc_port)
+ struct device *dev,
+ int channel, u16 lpc_port)
{
int rc = 0;
u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+ const struct aspeed_lpc_snoop_model_data *model_data =
+ of_device_get_match_data(dev);
/* Create FIFO datastructure */
rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
@@ -155,7 +166,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
lpc_port << snpwadr_shift);
- regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en);
+ if (model_data->has_hicrb_ensnp)
+ regmap_update_bits(lpc_snoop->regmap, HICRB,
+ hicrb_en, hicrb_en);
return rc;
}
@@ -213,14 +226,14 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = aspeed_lpc_enable_snoop(lpc_snoop, 0, port);
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
if (rc)
return rc;
/* Configuration of 2nd snoop channel port is optional */
if (of_property_read_u32_index(dev->of_node, "snoop-ports",
1, &port) == 0) {
- rc = aspeed_lpc_enable_snoop(lpc_snoop, 1, port);
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
if (rc)
aspeed_lpc_disable_snoop(lpc_snoop, 0);
}
@@ -239,8 +252,19 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
return 0;
}
+static const struct aspeed_lpc_snoop_model_data ast2400_model_data = {
+ .has_hicrb_ensnp = 0,
+};
+
+static const struct aspeed_lpc_snoop_model_data ast2500_model_data = {
+ .has_hicrb_ensnp = 1,
+};
+
static const struct of_device_id aspeed_lpc_snoop_match[] = {
- { .compatible = "aspeed,ast2500-lpc-snoop" },
+ { .compatible = "aspeed,ast2400-lpc-snoop",
+ .data = &ast2400_model_data },
+ { .compatible = "aspeed,ast2500-lpc-snoop",
+ .data = &ast2500_model_data },
{ },
};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 38fcfe219d1c..9c62bf064f77 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1175,7 +1175,7 @@ static struct attribute *sysfs_attrs[] = {
NULL
};
-static struct attribute_group bh1770_attribute_group = {
+static const struct attribute_group bh1770_attribute_group = {
.attrs = sysfs_attrs
};
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 28bb495f0cf1..7231260ac287 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -173,7 +173,7 @@ static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ds1682_eeprom_attr = {
+static const struct bin_attribute ds1682_eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO | S_IWUSR,
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2fad790db3bf..60e3d91b5e03 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -114,7 +114,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute eeprom_attr = {
+static const struct bin_attribute eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 94cc035aa841..38766968bfa2 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -377,8 +377,6 @@ static int eeprom_93xx46_probe_dt(struct spi_device *spi)
struct device_node *np = spi->dev.of_node;
struct eeprom_93xx46_platform_data *pd;
u32 tmp;
- int gpio;
- enum of_gpio_flags of_flags;
int ret;
pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
@@ -403,22 +401,14 @@ static int eeprom_93xx46_probe_dt(struct spi_device *spi)
if (of_property_read_bool(np, "read-only"))
pd->flags |= EE_READONLY;
- gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
- if (gpio_is_valid(gpio)) {
- unsigned long flags =
- of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+ pd->select = devm_gpiod_get_optional(&spi->dev, "select",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pd->select))
+ return PTR_ERR(pd->select);
- ret = devm_gpio_request_one(&spi->dev, gpio, flags,
- "eeprom_93xx46_select");
- if (ret)
- return ret;
-
- pd->select = gpio_to_desc(gpio);
- pd->prepare = select_assert;
- pd->finish = select_deassert;
-
- gpiod_direction_output(pd->select, 0);
- }
+ pd->prepare = select_assert;
+ pd->finish = select_deassert;
+ gpiod_direction_output(pd->select, 0);
if (of_id->data) {
const struct eeprom_93xx46_devtype_data *data = of_id->data;
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index ab0df6a17690..34a5a41578d7 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -77,7 +77,7 @@
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/i2c.h>
#include <linux/pci_ids.h>
#include <linux/delay.h>
@@ -1089,101 +1089,85 @@ static void idt_set_defval(struct idt_89hpesx_dev *pdev)
pdev->eeaddr = 0;
}
-#ifdef CONFIG_OF
static const struct i2c_device_id ee_ids[];
+
/*
* idt_ee_match_id() - check whether the node belongs to compatible EEPROMs
*/
-static const struct i2c_device_id *idt_ee_match_id(struct device_node *node)
+static const struct i2c_device_id *idt_ee_match_id(struct fwnode_handle *fwnode)
{
const struct i2c_device_id *id = ee_ids;
+ const char *compatible, *p;
char devname[I2C_NAME_SIZE];
+ int ret;
- /* Retrieve the device name without manufacturer name */
- if (of_modalias_node(node, devname, sizeof(devname)))
+ ret = fwnode_property_read_string(fwnode, "compatible", &compatible);
+ if (ret)
return NULL;
+ p = strchr(compatible, ',');
+ strlcpy(devname, p ? p + 1 : compatible, sizeof(devname));
/* Search through the device name */
- while (id->name[0]) {
- if (strcmp(devname, id->name) == 0)
- return id;
- id++;
- }
- return NULL;
+ while (id->name[0]) {
+ if (strcmp(devname, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
}
/*
- * idt_get_ofdata() - get IDT i2c-device parameters from device tree
+ * idt_get_fw_data() - get IDT i2c-device parameters from device tree
* @pdev: Pointer to the driver data
*/
-static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
+static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
{
- const struct device_node *node = pdev->client->dev.of_node;
struct device *dev = &pdev->client->dev;
+ struct fwnode_handle *fwnode;
+ const struct i2c_device_id *ee_id = NULL;
+ u32 eeprom_addr;
+ int ret;
- /* Read dts node parameters */
- if (node) {
- const struct i2c_device_id *ee_id = NULL;
- struct device_node *child;
- const __be32 *addr_be;
- int len;
-
- /* Walk through all child nodes looking for compatible one */
- for_each_available_child_of_node(node, child) {
- ee_id = idt_ee_match_id(child);
- if (IS_ERR_OR_NULL(ee_id)) {
- dev_warn(dev, "Skip unsupported child node %s",
- child->full_name);
- continue;
- } else
- break;
- }
-
- /* If there is no child EEPROM device, then set zero size */
- if (!ee_id) {
- idt_set_defval(pdev);
- return;
- }
+ device_for_each_child_node(dev, fwnode) {
+ ee_id = idt_ee_match_id(fwnode);
+ if (IS_ERR_OR_NULL(ee_id)) {
+ dev_warn(dev, "Skip unsupported EEPROM device");
+ continue;
+ } else
+ break;
+ }
- /* Retrieve EEPROM size */
- pdev->eesize = (u32)ee_id->driver_data;
-
- /* Get custom EEPROM address from 'reg' attribute */
- addr_be = of_get_property(child, "reg", &len);
- if (!addr_be || (len < sizeof(*addr_be))) {
- dev_warn(dev, "No reg on %s, use default address %d",
- child->full_name, EEPROM_DEF_ADDR);
- pdev->inieecmd = 0;
- pdev->eeaddr = EEPROM_DEF_ADDR << 1;
- } else {
- pdev->inieecmd = EEPROM_USA;
- pdev->eeaddr = be32_to_cpup(addr_be) << 1;
- }
+ /* If there is no fwnode EEPROM device, then set zero size */
+ if (!ee_id) {
+ dev_warn(dev, "No fwnode, EEPROM access disabled");
+ idt_set_defval(pdev);
+ return;
+ }
- /* Check EEPROM 'read-only' flag */
- if (of_get_property(child, "read-only", NULL))
- pdev->eero = true;
- else /* if (!of_get_property(node, "read-only", NULL)) */
- pdev->eero = false;
+ /* Retrieve EEPROM size */
+ pdev->eesize = (u32)ee_id->driver_data;
- dev_dbg(dev, "EEPROM of %u bytes found by %hhu",
- pdev->eesize, pdev->eeaddr);
+ /* Get custom EEPROM address from 'reg' attribute */
+ ret = fwnode_property_read_u32(fwnode, "reg", &eeprom_addr);
+ if (ret || (eeprom_addr == 0)) {
+ dev_warn(dev, "No EEPROM reg found, use default address 0x%x",
+ EEPROM_DEF_ADDR);
+ pdev->inieecmd = 0;
+ pdev->eeaddr = EEPROM_DEF_ADDR << 1;
} else {
- dev_warn(dev, "No dts node, EEPROM access disabled");
- idt_set_defval(pdev);
+ pdev->inieecmd = EEPROM_USA;
+ pdev->eeaddr = eeprom_addr << 1;
}
-}
-#else
-static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
-{
- struct device *dev = &pdev->client->dev;
- dev_warn(dev, "OF table is unsupported, EEPROM access disabled");
+ /* Check EEPROM 'read-only' flag */
+ if (fwnode_property_read_bool(fwnode, "read-only"))
+ pdev->eero = true;
+ else /* if (!fwnode_property_read_bool(node, "read-only")) */
+ pdev->eero = false;
- /* Nothing we can do, just set the default values */
- idt_set_defval(pdev);
+ dev_info(dev, "EEPROM of %d bytes found by 0x%x",
+ pdev->eesize, pdev->eeaddr);
}
-#endif /* CONFIG_OF */
/*
* idt_create_pdev() - create and init data structure of the driver
@@ -1203,8 +1187,8 @@ static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client)
pdev->client = client;
i2c_set_clientdata(client, pdev);
- /* Read OF nodes information */
- idt_get_ofdata(pdev);
+ /* Read firmware nodes information */
+ idt_get_fw_data(pdev);
/* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */
pdev->inicsrcmd = CSR_DWE;
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index e4dd93b2518c..0e32709d1022 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -124,7 +124,7 @@ static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute user_eeprom_attr = {
+static const struct bin_attribute user_eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 90520d76633f..eeb7eef62174 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -132,7 +132,7 @@ static int hmc6352_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id hmc6352_id[] = {
+static const struct i2c_device_id hmc6352_id[] = {
{ "hmc6352", 0 },
{ }
};
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index fea8ff40440f..097e3092c158 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -857,7 +857,7 @@ out:
return error;
}
-static struct pci_device_id ilo_devices[] = {
+static const struct pci_device_id ilo_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
{ }
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 8758d033db23..ec0832278170 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -454,7 +454,7 @@ ioc4_remove(struct pci_dev *pdev)
kfree(idd);
}
-static struct pci_device_id ioc4_id_table[] = {
+static const struct pci_device_id ioc4_id_table[] = {
{PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
{0}
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 4a9c50a43afb..e3bd3c1d2574 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -145,7 +145,7 @@ static struct attribute *mid_att_als[] = {
NULL
};
-static struct attribute_group m_als_gr = {
+static const struct attribute_group m_als_gr = {
.name = "isl29020",
.attrs = mid_att_als
};
@@ -188,7 +188,7 @@ static int isl29020_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id isl29020_id[] = {
+static const struct i2c_device_id isl29020_id[] = {
{ "isl29020", 0 },
{ }
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index e389b0b5278d..8d53609861d8 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -856,7 +856,7 @@ static struct attribute *lis3lv02d_attributes[] = {
NULL
};
-static struct attribute_group lis3lv02d_attribute_group = {
+static const struct attribute_group lis3lv02d_attribute_group = {
.attrs = lis3lv02d_attributes
};
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index 3b4976396ec4..bfb6c45b6130 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -14,20 +14,17 @@ void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
void lkdtm_OVERFLOW(void);
void lkdtm_CORRUPT_STACK(void);
+void lkdtm_CORRUPT_STACK_STRONG(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
-void lkdtm_REFCOUNT_SATURATE_INC(void);
-void lkdtm_REFCOUNT_SATURATE_ADD(void);
-void lkdtm_REFCOUNT_ZERO_DEC(void);
-void lkdtm_REFCOUNT_ZERO_INC(void);
-void lkdtm_REFCOUNT_ZERO_SUB(void);
-void lkdtm_REFCOUNT_ZERO_ADD(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
+void lkdtm_STACK_GUARD_PAGE_LEADING(void);
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
@@ -49,6 +46,27 @@ void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
void lkdtm_ACCESS_USERSPACE(void);
+/* lkdtm_refcount.c */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_DEC_ZERO(void);
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_INC_ZERO(void);
+void lkdtm_REFCOUNT_ADD_ZERO(void);
+void lkdtm_REFCOUNT_INC_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_SATURATED(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_TIMING(void);
+void lkdtm_ATOMIC_TIMING(void);
+
/* lkdtm_rodata.c */
void lkdtm_rodata_do_nothing(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index d9028ef50fbe..9e0b4f959987 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -6,9 +6,9 @@
*/
#include "lkdtm.h"
#include <linux/list.h>
-#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
struct lkdtm_list {
@@ -85,16 +85,31 @@ void lkdtm_OVERFLOW(void)
static noinline void __lkdtm_CORRUPT_STACK(void *stack)
{
- memset(stack, 'a', 64);
+ memset(stack, '\xff', 64);
}
+/* This should trip the stack canary, not corrupt the return address. */
noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
- char data[8];
+ char data[8] __aligned(sizeof(void *));
+
+ __lkdtm_CORRUPT_STACK(&data);
+
+ pr_info("Corrupted stack containing char array ...\n");
+}
+
+/* Same as above but will only get a canary with -fstack-protector-strong */
+noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+{
+ union {
+ unsigned short shorts[4];
+ unsigned long *ptr;
+ } data __aligned(sizeof(void *));
+
__lkdtm_CORRUPT_STACK(&data);
- pr_info("Corrupted stack with '%16s'...\n", data);
+ pr_info("Corrupted stack containing union ...\n");
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -137,88 +152,6 @@ void lkdtm_HUNG_TASK(void)
schedule();
}
-void lkdtm_REFCOUNT_SATURATE_INC(void)
-{
- refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
-
- pr_info("attempting good refcount decrement\n");
- refcount_dec(&over);
- refcount_inc(&over);
-
- pr_info("attempting bad refcount inc overflow\n");
- refcount_inc(&over);
- refcount_inc(&over);
- if (refcount_read(&over) == UINT_MAX)
- pr_err("Correctly stayed saturated, but no BUG?!\n");
- else
- pr_err("Fail: refcount wrapped\n");
-}
-
-void lkdtm_REFCOUNT_SATURATE_ADD(void)
-{
- refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
-
- pr_info("attempting good refcount decrement\n");
- refcount_dec(&over);
- refcount_inc(&over);
-
- pr_info("attempting bad refcount add overflow\n");
- refcount_add(2, &over);
- if (refcount_read(&over) == UINT_MAX)
- pr_err("Correctly stayed saturated, but no BUG?!\n");
- else
- pr_err("Fail: refcount wrapped\n");
-}
-
-void lkdtm_REFCOUNT_ZERO_DEC(void)
-{
- refcount_t zero = REFCOUNT_INIT(1);
-
- pr_info("attempting bad refcount decrement to zero\n");
- refcount_dec(&zero);
- if (refcount_read(&zero) == 0)
- pr_err("Stayed at zero, but no BUG?!\n");
- else
- pr_err("Fail: refcount went crazy\n");
-}
-
-void lkdtm_REFCOUNT_ZERO_SUB(void)
-{
- refcount_t zero = REFCOUNT_INIT(1);
-
- pr_info("attempting bad refcount subtract past zero\n");
- if (!refcount_sub_and_test(2, &zero))
- pr_info("wrap attempt was noticed\n");
- if (refcount_read(&zero) == 1)
- pr_err("Correctly stayed above 0, but no BUG?!\n");
- else
- pr_err("Fail: refcount wrapped\n");
-}
-
-void lkdtm_REFCOUNT_ZERO_INC(void)
-{
- refcount_t zero = REFCOUNT_INIT(0);
-
- pr_info("attempting bad refcount increment from zero\n");
- refcount_inc(&zero);
- if (refcount_read(&zero) == 0)
- pr_err("Stayed at zero, but no BUG?!\n");
- else
- pr_err("Fail: refcount went past zero\n");
-}
-
-void lkdtm_REFCOUNT_ZERO_ADD(void)
-{
- refcount_t zero = REFCOUNT_INIT(0);
-
- pr_info("attempting bad refcount addition from zero\n");
- refcount_add(2, &zero);
- if (refcount_read(&zero) == 0)
- pr_err("Stayed at zero, but no BUG?!\n");
- else
- pr_err("Fail: refcount went past zero\n");
-}
-
void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
@@ -282,6 +215,7 @@ void lkdtm_CORRUPT_LIST_DEL(void)
pr_err("list_del() corruption not detected!\n");
}
+/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
void lkdtm_CORRUPT_USER_DS(void)
{
pr_info("setting bad task size limit\n");
@@ -290,3 +224,31 @@ void lkdtm_CORRUPT_USER_DS(void)
/* Make sure we do not keep running with a KERNEL_DS! */
force_sig(SIGKILL, current);
}
+
+/* Test that VMAP_STACK is actually allocating with a leading guard page */
+void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack - 1;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page below current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page before stack!\n");
+}
+
+/* Test that VMAP_STACK is actually allocating with a trailing guard page */
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack + THREAD_SIZE;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page above current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page after stack!\n");
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 42d2b8e31e6b..981b3ef71e47 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -201,6 +201,9 @@ struct crashtype crashtypes[] = {
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_USER_DS),
CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
@@ -221,12 +224,25 @@ struct crashtype crashtypes[] = {
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
- CRASHTYPE(REFCOUNT_SATURATE_INC),
- CRASHTYPE(REFCOUNT_SATURATE_ADD),
- CRASHTYPE(REFCOUNT_ZERO_DEC),
- CRASHTYPE(REFCOUNT_ZERO_INC),
- CRASHTYPE(REFCOUNT_ZERO_SUB),
- CRASHTYPE(REFCOUNT_ZERO_ADD),
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_TIMING),
+ CRASHTYPE(ATOMIC_TIMING),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
diff --git a/drivers/misc/lkdtm_refcount.c b/drivers/misc/lkdtm_refcount.c
new file mode 100644
index 000000000000..2b99d448e7fd
--- /dev/null
+++ b/drivers/misc/lkdtm_refcount.c
@@ -0,0 +1,400 @@
+/*
+ * This is for all the tests related to refcount bugs (e.g. overflow,
+ * underflow, reaching zero untested, etc).
+ */
+#include "lkdtm.h"
+#include <linux/refcount.h>
+
+#ifdef CONFIG_REFCOUNT_FULL
+#define REFCOUNT_MAX (UINT_MAX - 1)
+#define REFCOUNT_SATURATED UINT_MAX
+#else
+#define REFCOUNT_MAX INT_MAX
+#define REFCOUNT_SATURATED (INT_MIN / 2)
+#endif
+
+static void overflow_check(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Overflow detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Overflow detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() above the maximum value of the refcount implementation,
+ * should at least saturate, and at most also WARN.
+ */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_inc() without overflow\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
+
+ pr_info("attempting bad refcount_inc() overflow\n");
+ refcount_inc(&over);
+ refcount_inc(&over);
+
+ overflow_check(&over);
+}
+
+/* refcount_add() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_add() without overflow\n");
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_add(4, &over);
+
+ pr_info("attempting bad refcount_add() overflow\n");
+ refcount_add(4, &over);
+
+ overflow_check(&over);
+}
+
+/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_inc_not_zero() overflow\n");
+ if (!refcount_inc_not_zero(&over))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+/* refcount_add_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_add_not_zero() overflow\n");
+ if (!refcount_add_not_zero(6, &over))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+static void check_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ case 0:
+ pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
+ * zero it should either saturate (when inc-from-zero isn't protected)
+ * or stay at zero (when inc-from-zero is protected) and should WARN for both.
+ */
+void lkdtm_REFCOUNT_DEC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(2);
+
+ pr_info("attempting good refcount_dec()\n");
+ refcount_dec(&zero);
+
+ pr_info("attempting bad refcount_dec() to zero\n");
+ refcount_dec(&zero);
+
+ check_zero(&zero);
+}
+
+static void check_negative(refcount_t *ref, int start)
+{
+ /*
+ * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an
+ * over-sub, so we have to track our starting position instead of
+ * looking only at zero-pinning.
+ */
+ if (refcount_read(ref) == start) {
+ pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
+ start);
+ return;
+ }
+
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Negative detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Negative detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/* A refcount_dec() going negative should saturate and may WARN. */
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec() below zero\n");
+ refcount_dec(&neg);
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_dec_and_test() should act like refcount_dec() above when
+ * going negative.
+ */
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec_and_test() below zero\n");
+ if (refcount_dec_and_test(&neg))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_sub_and_test() should act like refcount_dec_and_test()
+ * above when going negative.
+ */
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(3);
+
+ pr_info("attempting bad refcount_sub_and_test() below zero\n");
+ if (refcount_sub_and_test(5, &neg))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_negative(&neg, 3);
+}
+
+static void check_from_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case 0:
+ pr_info("Zero detected: stayed at zero\n");
+ break;
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_info("Fail: zero not detected, incremented to %d\n",
+ refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from zero should pin to zero or saturate and may WARN.
+ * Only CONFIG_REFCOUNT_FULL provides this protection currently.
+ */
+void lkdtm_REFCOUNT_INC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_inc_not_zero() from zero\n");
+ if (!refcount_inc_not_zero(&zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero!\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_inc() from zero\n");
+ refcount_inc(&zero);
+
+ check_from_zero(&zero);
+}
+
+/*
+ * A refcount_add() should act like refcount_inc() above when starting
+ * at zero.
+ */
+void lkdtm_REFCOUNT_ADD_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_add_not_zero() from zero\n");
+ if (!refcount_add_not_zero(3, &zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_add() from zero\n");
+ refcount_add(3, &zero);
+
+ check_from_zero(&zero);
+}
+
+static void check_saturated(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Saturation detected: still saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Saturation detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from a saturated value should at most warn about
+ * being saturated already.
+ */
+void lkdtm_REFCOUNT_INC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc() from saturated\n");
+ refcount_inc(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_dec(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_add(8, &sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
+ if (!refcount_inc_not_zero(&sat))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_add_not_zero() from saturated\n");
+ if (!refcount_add_not_zero(7, &sat))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec_and_test() from saturated\n");
+ if (refcount_dec_and_test(&sat))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_sub_and_test() from saturated\n");
+ if (refcount_sub_and_test(8, &sat))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Used to time the existing atomic_t when used for reference counting */
+void lkdtm_ATOMIC_TIMING(void)
+{
+ unsigned int i;
+ atomic_t count = ATOMIC_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ atomic_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (atomic_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("atomic timing: done\n");
+}
+
+/*
+ * This can be compared to ATOMIC_TIMING when implementing fast refcount
+ * protections. Looking at the number of CPU cycles tells the real story
+ * about performance. For example:
+ * cd /sys/kernel/debug/provoke-crash
+ * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
+ */
+void lkdtm_REFCOUNT_TIMING(void)
+{
+ unsigned int i;
+ refcount_t count = REFCOUNT_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ refcount_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (refcount_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("refcount timing: done\n");
+}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 40c79089e548..1ac10cb64d6e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -845,7 +845,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
kfree(cldev);
}
-static struct device_type mei_cl_device_type = {
+static const struct device_type mei_cl_device_type = {
.release = mei_cl_bus_dev_release,
};
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 71216affcab1..10dcf4ff99a5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1354,10 +1354,10 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.quirk_probe = mei_me_fw_type_sps
-#define MEI_CFG_LEGACY_HFS \
+#define MEI_CFG_ICH_HFS \
.fw_status.count = 0
-#define MEI_CFG_ICH_HFS \
+#define MEI_CFG_ICH10_HFS \
.fw_status.count = 1, \
.fw_status.status[0] = PCI_CFG_HFS_1
@@ -1376,38 +1376,61 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.fw_status.status[5] = PCI_CFG_HFS_6
/* ICH Legacy devices */
-const struct mei_cfg mei_me_legacy_cfg = {
- MEI_CFG_LEGACY_HFS,
+static const struct mei_cfg mei_me_ich_cfg = {
+ MEI_CFG_ICH_HFS,
};
/* ICH devices */
-const struct mei_cfg mei_me_ich_cfg = {
- MEI_CFG_ICH_HFS,
+static const struct mei_cfg mei_me_ich10_cfg = {
+ MEI_CFG_ICH10_HFS,
};
/* PCH devices */
-const struct mei_cfg mei_me_pch_cfg = {
+static const struct mei_cfg mei_me_pch_cfg = {
MEI_CFG_PCH_HFS,
};
-
/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
-const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
+static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
MEI_CFG_PCH_HFS,
MEI_CFG_FW_NM,
};
/* PCH8 Lynx Point and newer devices */
-const struct mei_cfg mei_me_pch8_cfg = {
+static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_PCH8_HFS,
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_SPS,
};
+/*
+ * mei_cfg_list - A list of platform platform specific configurations.
+ * Note: has to be synchronized with enum mei_cfg_idx.
+ */
+static const struct mei_cfg *const mei_cfg_list[] = {
+ [MEI_ME_UNDEF_CFG] = NULL,
+ [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
+ [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
+ [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+ [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
+ [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
+ [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+};
+
+const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
+
+ if (idx >= MEI_ME_NUM_CFG)
+ return NULL;
+
+ return mei_cfg_list[idx];
+};
+
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index cf64847a35b9..67892533576e 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -41,8 +41,7 @@ struct mei_cfg {
#define MEI_PCI_DEVICE(dev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
- .driver_data = (kernel_ulong_t)&(cfg)
-
+ .driver_data = (kernel_ulong_t)(cfg),
#define MEI_ME_RPM_TIMEOUT 500 /* ms */
@@ -63,12 +62,36 @@ struct mei_me_hw {
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
-extern const struct mei_cfg mei_me_legacy_cfg;
-extern const struct mei_cfg mei_me_ich_cfg;
-extern const struct mei_cfg mei_me_pch_cfg;
-extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg;
-extern const struct mei_cfg mei_me_pch8_cfg;
-extern const struct mei_cfg mei_me_pch8_sps_cfg;
+/**
+ * enum mei_cfg_idx - indices to platform specific configurations.
+ *
+ * Note: has to be synchronized with mei_cfg_list[]
+ *
+ * @MEI_ME_UNDEF_CFG: Lower sentinel.
+ * @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices.
+ * @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10
+ * @MEI_ME_PCH_CFG: Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
+ * with quirk for Node Manager exclusion.
+ * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
+ * client platforms.
+ * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
+ * @MEI_ME_NUM_CFG: Upper Sentinel.
+ */
+enum mei_cfg_idx {
+ MEI_ME_UNDEF_CFG,
+ MEI_ME_ICH_CFG,
+ MEI_ME_ICH10_CFG,
+ MEI_ME_PCH_CFG,
+ MEI_ME_PCH_CPT_PBG_CFG,
+ MEI_ME_PCH8_CFG,
+ MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_NUM_CFG,
+};
+
+const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
const struct mei_cfg *cfg);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index bac33311f55a..4ff40d319676 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -43,57 +43,58 @@
/* mei_pci_tbl - PCI Device ID Table */
static const struct pci_device_id mei_me_pci_tbl[] = {
- {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
/* required last entry */
{0, }
@@ -138,12 +139,15 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
*/
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
+ const struct mei_cfg *cfg;
struct mei_device *dev;
struct mei_me_hw *hw;
unsigned int irqflags;
int err;
+ cfg = mei_me_get_cfg(ent->driver_data);
+ if (!cfg)
+ return -ENODEV;
if (!mei_me_quirk_probe(pdev, cfg))
return -ENODEV;
@@ -491,6 +495,7 @@ static struct pci_driver mei_me_driver = {
.remove = mei_me_remove,
.shutdown = mei_me_shutdown,
.driver.pm = MEI_ME_PM_OPS,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
module_pci_driver(mei_me_driver);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index e42bdc90fa27..540845651b8c 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -659,7 +659,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac);
-static struct bin_attribute pch_bin_attr = {
+static const struct bin_attribute pch_bin_attr = {
.attr = {
.name = "pch_firmware",
.mode = S_IRUGO | S_IWUSR,
@@ -891,7 +891,7 @@ static int pch_phub_resume(struct pci_dev *pdev)
#define pch_phub_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pch_phub_pcidev_id[] = {
+static const struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index d1185b78cf9a..fc0415771c00 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -196,15 +196,15 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
ret = of_address_to_resource(child, 0, &child_res);
if (ret < 0) {
dev_err(sram->dev,
- "could not get address for node %s\n",
- child->full_name);
+ "could not get address for node %pOF\n",
+ child);
goto err_chunks;
}
if (child_res.start < res->start || child_res.end > res->end) {
dev_err(sram->dev,
- "reserved block %s outside the sram area\n",
- child->full_name);
+ "reserved block %pOF outside the sram area\n",
+ child);
ret = -EINVAL;
goto err_chunks;
}
@@ -230,8 +230,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
ret = of_property_read_string(child, "label", &label);
if (ret && ret != -EINVAL) {
dev_err(sram->dev,
- "%s has invalid label name\n",
- child->full_name);
+ "%pOF has invalid label name\n",
+ child);
goto err_chunks;
}
if (!label)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index e74413f882ab..b77aacafc3fc 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -660,7 +660,7 @@ static struct attribute *uim_attrs[] = {
NULL,
};
-static struct attribute_group uim_attr_grp = {
+static const struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index a37a42f67088..e5f108713dd8 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -415,7 +415,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
tifm_free_adapter(fm);
}
-static struct pci_device_id tifm_7xx1_pci_tbl [] = {
+static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 06c4974ee8dd..8af5c2672f71 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -2235,14 +2235,8 @@ int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
handle.context, handle.resource,
result);
- if (entry->vmci_page_files)
- qp_host_unregister_user_memory(entry->produce_q,
- entry->
- consume_q);
- else
- qp_host_unregister_user_memory(entry->produce_q,
- entry->
- consume_q);
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
}
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
index 6bac5b0fea13..0e1e59760e3f 100644
--- a/drivers/mux/Makefile
+++ b/drivers/mux/Makefile
@@ -2,6 +2,11 @@
# Makefile for multiplexer devices.
#
+mux-core-objs := core.o
+mux-adg792a-objs := adg792a.o
+mux-gpio-objs := gpio.o
+mux-mmio-objs := mmio.o
+
obj-$(CONFIG_MULTIPLEXER) += mux-core.o
obj-$(CONFIG_MUX_ADG792A) += mux-adg792a.o
obj-$(CONFIG_MUX_GPIO) += mux-gpio.o
diff --git a/drivers/mux/mux-adg792a.c b/drivers/mux/adg792a.c
index 12aa221ab90d..12aa221ab90d 100644
--- a/drivers/mux/mux-adg792a.c
+++ b/drivers/mux/adg792a.c
diff --git a/drivers/mux/mux-core.c b/drivers/mux/core.c
index 2fe96c470112..2260063b0ea8 100644
--- a/drivers/mux/mux-core.c
+++ b/drivers/mux/core.c
@@ -58,7 +58,7 @@ static void mux_chip_release(struct device *dev)
kfree(mux_chip);
}
-static struct device_type mux_type = {
+static const struct device_type mux_type = {
.name = "mux-chip",
.release = mux_chip_release,
};
@@ -452,8 +452,8 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
"mux-controls", "#mux-control-cells",
index, &args);
if (ret) {
- dev_err(dev, "%s: failed to get mux-control %s(%i)\n",
- np->full_name, mux_name ?: "", index);
+ dev_err(dev, "%pOF: failed to get mux-control %s(%i)\n",
+ np, mux_name ?: "", index);
return ERR_PTR(ret);
}
@@ -464,8 +464,8 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (args.args_count > 1 ||
(!args.args_count && (mux_chip->controllers > 1))) {
- dev_err(dev, "%s: wrong #mux-control-cells for %s\n",
- np->full_name, args.np->full_name);
+ dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
+ np, args.np);
return ERR_PTR(-EINVAL);
}
@@ -474,8 +474,8 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
controller = args.args[0];
if (controller >= mux_chip->controllers) {
- dev_err(dev, "%s: bad mux controller %u specified in %s\n",
- np->full_name, controller, args.np->full_name);
+ dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
+ np, controller, args.np);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/mux/mux-gpio.c b/drivers/mux/gpio.c
index 468bf1709606..468bf1709606 100644
--- a/drivers/mux/mux-gpio.c
+++ b/drivers/mux/gpio.c
diff --git a/drivers/mux/mux-mmio.c b/drivers/mux/mmio.c
index 37c1de359a70..37c1de359a70 100644
--- a/drivers/mux/mux-mmio.c
+++ b/drivers/mux/mmio.c
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 4c49285168fb..de54c7f5048a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -290,7 +290,7 @@ static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
mutex_lock(&nvmem_cells_mutex);
list_for_each_entry(p, &nvmem_cells, node)
- if (p && !strcmp(p->name, cell_id)) {
+ if (!strcmp(p->name, cell_id)) {
mutex_unlock(&nvmem_cells_mutex);
return p;
}
@@ -794,8 +794,8 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
addr = of_get_property(cell_np, "reg", &len);
if (!addr || (len < 2 * sizeof(u32))) {
- dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
- cell_np->full_name);
+ dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
+ cell_np);
rval = -EINVAL;
goto err_mem;
}
@@ -1111,6 +1111,43 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
EXPORT_SYMBOL_GPL(nvmem_cell_write);
/**
+ * nvmem_cell_read_u32() - Read a cell value as an u32
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+ if (len != sizeof(*val)) {
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return -EINVAL;
+ }
+ memcpy(val, buf, sizeof(*val));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
+
+/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
* @nvmem: nvmem device to read from.
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index c81ae4c6da74..6c7e2c424a4e 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -197,7 +197,7 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
return ret;
}
- rst = devm_reset_control_get(dev, NULL);
+ rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rst)) {
dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
ret = PTR_ERR(rst);
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 46eb15fb57ff..5484a46dafda 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -44,7 +44,7 @@ static struct daisydev {
} *topology = NULL;
static DEFINE_SPINLOCK(topology_lock);
-static int numdevs = 0;
+static int numdevs;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index a81cd2a2747f..9fbf6ccd54de 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -18,7 +18,7 @@
#include <asm/irq.h>
#include <asm/atariints.h>
-static struct parport *this_port = NULL;
+static struct parport *this_port;
static unsigned char
parport_atari_read_data(struct parport *p)
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 8f8c9f3aa691..2fc91edb058d 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -358,8 +358,7 @@ static int parport_ax88796_probe(struct platform_device *pdev)
exit_unmap:
iounmap(dd->base);
exit_res:
- release_resource(dd->io);
- kfree(dd->io);
+ release_mem_region(dd->io->start, size);
exit_mem:
kfree(dd);
return ret;
@@ -373,8 +372,7 @@ static int parport_ax88796_remove(struct platform_device *pdev)
free_irq(p->irq, p);
parport_remove_port(p);
iounmap(dd->base);
- release_resource(dd->io);
- kfree(dd->io);
+ release_mem_region(dd->io->start, resource_size(dd->io));
kfree(dd);
return 0;
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index dcbeeb220dda..0186db7680d4 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -138,7 +138,7 @@ static unsigned int features = ~0U;
static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
/* We do not support more than one port. */
-static struct parport *this_port = NULL;
+static struct parport *this_port;
/* Timing constants for FIFO modes. */
#define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 2f650f68af14..7f4be0e484c7 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -174,7 +174,7 @@ DPRINTK(KERN_DEBUG "read_status %02x\n", status);
return status;
}
-static int use_cnt = 0;
+static int use_cnt;
static irqreturn_t mfc3_interrupt(int irq, void *dev_id)
{
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 5548193a28a6..489492b608cf 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1083,9 +1083,9 @@ static void show_parconfig_winbond(int io, int key)
printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
(cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
- printk("dma=none\n");
+ pr_cont("dma=none\n");
else
- printk("dma=%d\n", cr74 & 0x07);
+ pr_cont("dma=%d\n", cr74 & 0x07);
printk(KERN_INFO
"Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
irqtypes[crf0>>7], (crf0>>3)&0x0f);
@@ -1685,14 +1685,14 @@ static int parport_ECP_supported(struct parport *pb)
pb->base, config, configb);
printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
if ((configb >> 3) & 0x07)
- printk("%d", intrline[(configb >> 3) & 0x07]);
+ pr_cont("%d", intrline[(configb >> 3) & 0x07]);
else
- printk("<none or set by other means>");
- printk(" dma=");
+ pr_cont("<none or set by other means>");
+ pr_cont(" dma=");
if ((configb & 0x03) == 0x00)
- printk("<none or set by other means>\n");
+ pr_cont("<none or set by other means>\n");
else
- printk("%d\n", configb & 0x07);
+ pr_cont("%d\n", configb & 0x07);
}
/* Go back to mode 000 */
@@ -2399,8 +2399,8 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
"parport_pc: ITE 8872 parallel port: io=0x%X",
ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
- printk(", irq=%d", irq);
- printk("\n");
+ pr_cont(", irq=%d", irq);
+ pr_cont("\n");
return 1;
}
@@ -2581,10 +2581,10 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
printk(KERN_INFO
"parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
- printk(", irq=%d", irq);
+ pr_cont(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
- printk(", dma=%d", dma);
- printk("\n");
+ pr_cont(", dma=%d", dma);
+ pr_cont("\n");
return 1;
}
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 9ae59e223131..d099a0c8cee5 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -253,16 +253,16 @@ static void extcon_work(struct work_struct *work)
vbus = gpiod_get_value_cansleep(driver->vbus_gpiod);
if (!id && vbus) { /* Host connected */
- extcon_set_cable_state_(driver->edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(driver->edev, EXTCON_USB_HOST, true);
pr_debug("Host cable connected\n");
driver->data->new_state = EVT_HOST;
connect_change(driver);
} else if (id && !vbus) { /* Disconnected */
- extcon_set_cable_state_(driver->edev, EXTCON_USB_HOST, false);
- extcon_set_cable_state_(driver->edev, EXTCON_USB, false);
+ extcon_set_state_sync(driver->edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(driver->edev, EXTCON_USB, false);
pr_debug("Cable disconnected\n");
} else if (id && vbus) { /* Device connected */
- extcon_set_cable_state_(driver->edev, EXTCON_USB, true);
+ extcon_set_state_sync(driver->edev, EXTCON_USB, true);
pr_debug("Device cable connected\n");
driver->data->new_state = EVT_DEVICE;
connect_change(driver);
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 4b20abc3ae2f..2d0c70b5589f 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -155,12 +155,12 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
}
if (uphy->vbus_edev) {
- state = extcon_get_cable_state_(uphy->vbus_edev, EXTCON_USB);
+ state = extcon_get_state(uphy->vbus_edev, EXTCON_USB);
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
- &uphy->vbus_notify);
+ ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
+ EXTCON_USB, &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -179,16 +179,8 @@ err_sleep:
static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
- int ret;
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
- if (uphy->vbus_edev) {
- ret = extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
- &uphy->vbus_notify);
- if (ret)
- return ret;
- }
-
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 994cc1a08f50..ee7ce5ee53f9 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -562,7 +562,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
rockchip_usb2phy_power_off(rport->phy);
/* fall through */
case OTG_STATE_B_IDLE:
- if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) > 0) {
+ if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) {
dev_dbg(&rport->phy->dev, "usb otg host connect\n");
rport->state = OTG_STATE_A_HOST;
rockchip_usb2phy_power_on(rport->phy);
@@ -615,7 +615,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
rport->vbus_attached = vbus_attach;
if (notify_charger && rphy->edev) {
- extcon_set_cable_state_(rphy->edev,
+ extcon_set_state_sync(rphy->edev,
cable, vbus_attach);
if (cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(rphy->edev,
@@ -636,7 +636,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
sch_work = true;
break;
case OTG_STATE_A_HOST:
- if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) == 0) {
+ if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) == 0) {
dev_dbg(&rport->phy->dev, "usb otg host disconnect\n");
rport->state = OTG_STATE_B_IDLE;
rockchip_usb2phy_power_off(rport->phy);
@@ -1066,8 +1066,8 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
if (!IS_ERR(rphy->edev)) {
rport->event_nb.notifier_call = rockchip_otg_event;
- ret = extcon_register_notifier(rphy->edev, EXTCON_USB_HOST,
- &rport->event_nb);
+ ret = devm_extcon_register_notifier(rphy->dev, rphy->edev,
+ EXTCON_USB_HOST, &rport->event_nb);
if (ret)
dev_err(rphy->dev, "register USB HOST notifier failed\n");
}
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index f4b7a98a7913..360b8218f322 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_VERSION_V2_MIN 0x20010000
#define PMIC_ARB_VERSION_V3_MIN 0x30000000
+#define PMIC_ARB_VERSION_V5_MIN 0x50000000
#define PMIC_ARB_INT_EN 0x0004
/* PMIC Arbiter channel registers offsets */
@@ -39,7 +40,6 @@
#define PMIC_ARB_WDATA1 0x14
#define PMIC_ARB_RDATA0 0x18
#define PMIC_ARB_RDATA1 0x1C
-#define PMIC_ARB_REG_CHNL(N) (0x800 + 0x4 * (N))
/* Mapping Table */
#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
@@ -51,7 +51,9 @@
#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
#define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
-#define PMIC_ARB_CHAN_VALID BIT(15)
+#define PMIC_ARB_APID_VALID BIT(15)
+#define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg) ((reg) & BIT(24))
+#define INVALID_EE 0xFF
/* Ownership Table */
#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
@@ -86,6 +88,15 @@ enum pmic_arb_cmd_op_code {
PMIC_ARB_OP_ZERO_WRITE = 16,
};
+/*
+ * PMIC arbiter version 5 uses different register offsets for read/write vs
+ * observer channels.
+ */
+enum pmic_arb_channel {
+ PMIC_ARB_CHANNEL_RW,
+ PMIC_ARB_CHANNEL_OBS,
+};
+
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 512
#define PMIC_ARB_TIMEOUT_US 100
@@ -97,22 +108,23 @@ enum pmic_arb_cmd_op_code {
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
-#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+#define spec_to_hwirq(slave_id, periph_id, irq_id, apid) \
((((slave_id) & 0xF) << 28) | \
(((periph_id) & 0xFF) << 20) | \
(((irq_id) & 0x7) << 16) | \
(((apid) & 0x1FF) << 0))
-#define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
-#define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
-#define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
-#define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
+#define hwirq_to_sid(hwirq) (((hwirq) >> 28) & 0xF)
+#define hwirq_to_per(hwirq) (((hwirq) >> 20) & 0xFF)
+#define hwirq_to_irq(hwirq) (((hwirq) >> 16) & 0x7)
+#define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x1FF)
struct pmic_arb_ver_ops;
struct apid_data {
u16 ppid;
- u8 owner;
+ u8 write_ee;
+ u8 irq_ee;
};
/**
@@ -128,12 +140,11 @@ struct apid_data {
* @ee: the current Execution Environment
* @min_apid: minimum APID (used for bounding IRQ search)
* @max_apid: maximum APID
- * @max_periph: maximum number of PMIC peripherals supported by HW.
* @mapping_table: in-memory copy of PPID -> APID mapping table.
* @domain: irq domain object for PMIC IRQ domain
* @spmic: SPMI controller object
* @ver_ops: version dependent operations.
- * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
+ * @ppid_to_apid in-memory copy of PPID -> APID mapping table.
*/
struct spmi_pmic_arb {
void __iomem *rd_base;
@@ -148,7 +159,6 @@ struct spmi_pmic_arb {
u8 ee;
u16 min_apid;
u16 max_apid;
- u16 max_periph;
u32 *mapping_table;
DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
struct irq_domain *domain;
@@ -164,92 +174,94 @@ struct spmi_pmic_arb {
*
* @ver_str: version string.
* @ppid_to_apid: finds the apid for a given ppid.
- * @mode: access rights to specified pmic peripheral.
* @non_data_cmd: on v1 issues an spmi non-data command.
* on v2 no HW support, returns -EOPNOTSUPP.
* @offset: on v1 offset of per-ee channel.
* on v2 offset of per-ee and per-ppid channel.
* @fmt_cmd: formats a GENI/SPMI command.
- * @owner_acc_status: on v1 offset of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
- * on v2 offset of SPMI_PIC_OWNERm_ACC_STATUSn.
- * @acc_enable: on v1 offset of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
- * on v2 offset of SPMI_PIC_ACC_ENABLEn.
- * @irq_status: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
- * on v2 offset of SPMI_PIC_IRQ_STATUSn.
- * @irq_clear: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
- * on v2 offset of SPMI_PIC_IRQ_CLEARn.
+ * @owner_acc_status: on v1 address of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
+ * on v2 address of SPMI_PIC_OWNERm_ACC_STATUSn.
+ * @acc_enable: on v1 address of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
+ * on v2 address of SPMI_PIC_ACC_ENABLEn.
+ * @irq_status: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
+ * on v2 address of SPMI_PIC_IRQ_STATUSn.
+ * @irq_clear: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
+ * on v2 address of SPMI_PIC_IRQ_CLEARn.
+ * @apid_map_offset: offset of PMIC_ARB_REG_CHNLn
*/
struct pmic_arb_ver_ops {
const char *ver_str;
- int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
- u16 *apid);
- int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
- mode_t *mode);
+ int (*ppid_to_apid)(struct spmi_pmic_arb *pmic_arb, u16 ppid);
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
- int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
- u32 *offset);
+ int (*offset)(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
+ enum pmic_arb_channel ch_type);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
- u32 (*owner_acc_status)(u8 m, u16 n);
- u32 (*acc_enable)(u16 n);
- u32 (*irq_status)(u16 n);
- u32 (*irq_clear)(u16 n);
+ void __iomem *(*owner_acc_status)(struct spmi_pmic_arb *pmic_arb, u8 m,
+ u16 n);
+ void __iomem *(*acc_enable)(struct spmi_pmic_arb *pmic_arb, u16 n);
+ void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n);
+ void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n);
+ u32 (*apid_map_offset)(u16 n);
};
-static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
+static inline void pmic_arb_base_write(struct spmi_pmic_arb *pmic_arb,
u32 offset, u32 val)
{
- writel_relaxed(val, pa->wr_base + offset);
+ writel_relaxed(val, pmic_arb->wr_base + offset);
}
-static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pmic_arb,
u32 offset, u32 val)
{
- writel_relaxed(val, pa->rd_base + offset);
+ writel_relaxed(val, pmic_arb->rd_base + offset);
}
/**
- * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
+ * pmic_arb_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
* @bc: byte count -1. range: 0..3
* @reg: register's address
* @buf: output parameter, length must be bc + 1
*/
-static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
+static void
+pmic_arb_read_data(struct spmi_pmic_arb *pmic_arb, u8 *buf, u32 reg, u8 bc)
{
- u32 data = __raw_readl(pa->rd_base + reg);
+ u32 data = __raw_readl(pmic_arb->rd_base + reg);
memcpy(buf, &data, (bc & 3) + 1);
}
/**
- * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
+ * pmic_arb_write_data: write 1..4 bytes from buf to pmic-arb's register
* @bc: byte-count -1. range: 0..3.
* @reg: register's address.
* @buf: buffer to write. length must be bc + 1.
*/
-static void
-pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
+static void pmic_arb_write_data(struct spmi_pmic_arb *pmic_arb, const u8 *buf,
+ u32 reg, u8 bc)
{
u32 data = 0;
memcpy(&data, buf, (bc & 3) + 1);
- pmic_arb_base_write(pa, reg, data);
+ __raw_writel(data, pmic_arb->wr_base + reg);
}
static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
- void __iomem *base, u8 sid, u16 addr)
+ void __iomem *base, u8 sid, u16 addr,
+ enum pmic_arb_channel ch_type)
{
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
u32 offset;
int rc;
- rc = pa->ver_ops->offset(pa, sid, addr, &offset);
- if (rc)
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, ch_type);
+ if (rc < 0)
return rc;
+ offset = rc;
offset += PMIC_ARB_STATUS;
while (timeout--) {
@@ -257,22 +269,19 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
- dev_err(&ctrl->dev,
- "%s: transaction denied (0x%x)\n",
+ dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
__func__, status);
return -EPERM;
}
if (status & PMIC_ARB_STATUS_FAILURE) {
- dev_err(&ctrl->dev,
- "%s: transaction failed (0x%x)\n",
+ dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
__func__, status);
return -EIO;
}
if (status & PMIC_ARB_STATUS_DROPPED) {
- dev_err(&ctrl->dev,
- "%s: transaction dropped (0x%x)\n",
+ dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
__func__, status);
return -EIO;
}
@@ -282,8 +291,7 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
udelay(1);
}
- dev_err(&ctrl->dev,
- "%s: timeout, status 0x%x\n",
+ dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n",
__func__, status);
return -ETIMEDOUT;
}
@@ -291,22 +299,24 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
static int
pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
u32 offset;
- rc = pa->ver_ops->offset(pa, sid, 0, &offset);
- if (rc)
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, PMIC_ARB_CHANNEL_RW);
+ if (rc < 0)
return rc;
+ offset = rc;
cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
- raw_spin_lock_irqsave(&pa->lock, flags);
- pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0);
- raw_spin_unlock_irqrestore(&pa->lock, flags);
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0,
+ PMIC_ARB_CHANNEL_RW);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
@@ -320,7 +330,7 @@ pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
/* Non-data command */
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
@@ -328,38 +338,27 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
return -EINVAL;
- return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
+ return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
}
static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, u8 *buf, size_t len)
{
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u8 bc = len - 1;
u32 cmd;
int rc;
u32 offset;
- mode_t mode;
-
- rc = pa->ver_ops->offset(pa, sid, addr, &offset);
- if (rc)
- return rc;
- rc = pa->ver_ops->mode(pa, sid, addr, &mode);
- if (rc)
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
+ PMIC_ARB_CHANNEL_OBS);
+ if (rc < 0)
return rc;
- if (!(mode & S_IRUSR)) {
- dev_err(&pa->spmic->dev,
- "error: impermissible read from peripheral sid:%d addr:0x%x\n",
- sid, addr);
- return -EPERM;
- }
-
+ offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev,
- "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -374,54 +373,45 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
- raw_spin_lock_irqsave(&pa->lock, flags);
- pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr);
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr,
+ PMIC_ARB_CHANNEL_OBS);
if (rc)
goto done;
- pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
+ pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
if (bc > 3)
- pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
+ pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1,
+ bc - 4);
done:
- raw_spin_unlock_irqrestore(&pa->lock, flags);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
- u16 addr, const u8 *buf, size_t len)
+ u16 addr, const u8 *buf, size_t len)
{
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u8 bc = len - 1;
u32 cmd;
int rc;
u32 offset;
- mode_t mode;
-
- rc = pa->ver_ops->offset(pa, sid, addr, &offset);
- if (rc)
- return rc;
- rc = pa->ver_ops->mode(pa, sid, addr, &mode);
- if (rc)
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
+ PMIC_ARB_CHANNEL_RW);
+ if (rc < 0)
return rc;
- if (!(mode & S_IWUSR)) {
- dev_err(&pa->spmic->dev,
- "error: impermissible write to peripheral sid:%d addr:0x%x\n",
- sid, addr);
- return -EPERM;
- }
-
+ offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev,
- "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -429,7 +419,7 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
opc = PMIC_ARB_OP_WRITE;
- else if (opc >= 0x00 && opc <= 0x0F)
+ else if (opc <= 0x0F)
opc = PMIC_ARB_OP_EXT_WRITE;
else if (opc >= 0x30 && opc <= 0x37)
opc = PMIC_ARB_OP_EXT_WRITEL;
@@ -438,18 +428,21 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
/* Write data to FIFOs */
- raw_spin_lock_irqsave(&pa->lock, flags);
- pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
+ min_t(u8, bc, 3));
if (bc > 3)
- pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
+ pmic_arb_write_data(pmic_arb, buf + 4, offset + PMIC_ARB_WDATA1,
+ bc - 4);
/* Start the transaction */
- pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr);
- raw_spin_unlock_irqrestore(&pa->lock, flags);
+ pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
+ PMIC_ARB_CHANNEL_RW);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
@@ -475,67 +468,64 @@ struct spmi_pmic_arb_qpnpint_type {
static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
size_t len)
{
- struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = HWIRQ_SID(d->hwirq);
- u8 per = HWIRQ_PER(d->hwirq);
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u8 sid = hwirq_to_sid(d->hwirq);
+ u8 per = hwirq_to_per(d->hwirq);
- if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + reg, buf, len))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed irqchip transaction on %x\n",
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
d->irq);
}
static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
{
- struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = HWIRQ_SID(d->hwirq);
- u8 per = HWIRQ_PER(d->hwirq);
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u8 sid = hwirq_to_sid(d->hwirq);
+ u8 per = hwirq_to_per(d->hwirq);
- if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
+ if (pmic_arb_read_cmd(pmic_arb->spmic, SPMI_CMD_EXT_READL, sid,
(per << 8) + reg, buf, len))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed irqchip transaction on %x\n",
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
d->irq);
}
-static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
+static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
{
- u16 ppid = pa->apid_data[apid].ppid;
+ u16 ppid = pmic_arb->apid_data[apid].ppid;
u8 sid = ppid >> 8;
u8 per = ppid & 0xFF;
u8 irq_mask = BIT(id);
- writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+ writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
- if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed to ack irq_mask = 0x%x for ppid = %x\n",
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
irq_mask, ppid);
- if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed to ack irq_mask = 0x%x for ppid = %x\n",
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
+static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
{
unsigned int irq;
u32 status;
int id;
- u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
- u8 per = pa->apid_data[apid].ppid & 0xFF;
+ u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
- status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
+ status = readl_relaxed(pmic_arb->ver_ops->irq_status(pmic_arb, apid));
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
- irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+ irq = irq_find_mapping(pmic_arb->domain,
+ spec_to_hwirq(sid, per, id, apid));
if (irq == 0) {
- cleanup_irq(pa, apid, id);
+ cleanup_irq(pmic_arb, apid, id);
continue;
}
generic_handle_irq(irq);
@@ -544,27 +534,28 @@ static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
static void pmic_arb_chained_irq(struct irq_desc *desc)
{
- struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
+ struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc);
+ const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
struct irq_chip *chip = irq_desc_get_chip(desc);
- void __iomem *intr = pa->intr;
- int first = pa->min_apid >> 5;
- int last = pa->max_apid >> 5;
+ int first = pmic_arb->min_apid >> 5;
+ int last = pmic_arb->max_apid >> 5;
+ u8 ee = pmic_arb->ee;
u32 status, enable;
int i, id, apid;
chained_irq_enter(chip, desc);
for (i = first; i <= last; ++i) {
- status = readl_relaxed(intr +
- pa->ver_ops->owner_acc_status(pa->ee, i));
+ status = readl_relaxed(
+ ver_ops->owner_acc_status(pmic_arb, ee, i));
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
apid = id + i * 32;
- enable = readl_relaxed(intr +
- pa->ver_ops->acc_enable(apid));
+ enable = readl_relaxed(
+ ver_ops->acc_enable(pmic_arb, apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
- periph_interrupt(pa, apid);
+ periph_interrupt(pmic_arb, apid);
}
}
@@ -573,12 +564,12 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
static void qpnpint_irq_ack(struct irq_data *d)
{
- struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = HWIRQ_IRQ(d->hwirq);
- u16 apid = HWIRQ_APID(d->hwirq);
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u8 irq = hwirq_to_irq(d->hwirq);
+ u16 apid = hwirq_to_apid(d->hwirq);
u8 data;
- writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
+ writel_relaxed(BIT(irq), pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
@@ -586,7 +577,7 @@ static void qpnpint_irq_ack(struct irq_data *d)
static void qpnpint_irq_mask(struct irq_data *d)
{
- u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 irq = hwirq_to_irq(d->hwirq);
u8 data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
@@ -594,13 +585,14 @@ static void qpnpint_irq_mask(struct irq_data *d)
static void qpnpint_irq_unmask(struct irq_data *d)
{
- struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = HWIRQ_IRQ(d->hwirq);
- u16 apid = HWIRQ_APID(d->hwirq);
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
+ u8 irq = hwirq_to_irq(d->hwirq);
+ u16 apid = hwirq_to_apid(d->hwirq);
u8 buf[2];
writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
- pa->intr + pa->ver_ops->acc_enable(apid));
+ ver_ops->acc_enable(pmic_arb, apid));
qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
if (!(buf[0] & BIT(irq))) {
@@ -618,44 +610,51 @@ static void qpnpint_irq_unmask(struct irq_data *d)
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct spmi_pmic_arb_qpnpint_type type;
- u8 irq = HWIRQ_IRQ(d->hwirq);
- u8 bit_mask_irq = BIT(irq);
+ irq_flow_handler_t flow_handler;
+ u8 irq = hwirq_to_irq(d->hwirq);
qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- type.type |= bit_mask_irq;
+ type.type |= BIT(irq);
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= bit_mask_irq;
+ type.polarity_high |= BIT(irq);
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= bit_mask_irq;
+ type.polarity_low |= BIT(irq);
+
+ flow_handler = handle_edge_irq;
} else {
if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
- type.type &= ~bit_mask_irq; /* level trig */
+ type.type &= ~BIT(irq); /* level trig */
if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= bit_mask_irq;
+ type.polarity_high |= BIT(irq);
else
- type.polarity_low |= bit_mask_irq;
+ type.polarity_low |= BIT(irq);
+
+ flow_handler = handle_level_irq;
}
qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
-
- if (flow_type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else
- irq_set_handler_locked(d, handle_level_irq);
+ irq_set_handler_locked(d, flow_handler);
return 0;
}
+static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+
+ return irq_set_irq_wake(pmic_arb->irq, on);
+}
+
static int qpnpint_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
- u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 irq = hwirq_to_irq(d->hwirq);
u8 status = 0;
if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -667,15 +666,34 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
return 0;
}
+static int qpnpint_irq_request_resources(struct irq_data *d)
+{
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u16 periph = hwirq_to_per(d->hwirq);
+ u16 apid = hwirq_to_apid(d->hwirq);
+ u16 sid = hwirq_to_sid(d->hwirq);
+ u16 irq = hwirq_to_irq(d->hwirq);
+
+ if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) {
+ dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
+ sid, periph, irq, pmic_arb->ee,
+ pmic_arb->apid_data[apid].irq_ee);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static struct irq_chip pmic_arb_irqchip = {
.name = "pmic_arb",
.irq_ack = qpnpint_irq_ack,
.irq_mask = qpnpint_irq_mask,
.irq_unmask = qpnpint_irq_unmask,
.irq_set_type = qpnpint_irq_set_type,
+ .irq_set_wake = qpnpint_irq_set_wake,
.irq_get_irqchip_state = qpnpint_get_irqchip_state,
- .flags = IRQCHIP_MASK_ON_SUSPEND
- | IRQCHIP_SKIP_SET_WAKE,
+ .irq_request_resources = qpnpint_irq_request_resources,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
@@ -685,12 +703,11 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{
- struct spmi_pmic_arb *pa = d->host_data;
+ struct spmi_pmic_arb *pmic_arb = d->host_data;
+ u16 apid, ppid;
int rc;
- u16 apid;
- dev_dbg(&pa->spmic->dev,
- "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
+ dev_dbg(&pmic_arb->spmic->dev, "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
intspec[0], intspec[1], intspec[2]);
if (irq_domain_get_of_node(d) != controller)
@@ -700,25 +717,25 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
return -EINVAL;
- rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
- (intspec[1] << 8), &apid);
+ ppid = intspec[0] << 8 | intspec[1];
+ rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid);
if (rc < 0) {
- dev_err(&pa->spmic->dev,
- "failed to xlate sid = 0x%x, periph = 0x%x, irq = %x rc = %d\n",
+ dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u rc = %d\n",
intspec[0], intspec[1], intspec[2], rc);
return rc;
}
+ apid = rc;
/* Keep track of {max,min}_apid for bounding search during interrupt */
- if (apid > pa->max_apid)
- pa->max_apid = apid;
- if (apid < pa->min_apid)
- pa->min_apid = apid;
+ if (apid > pmic_arb->max_apid)
+ pmic_arb->max_apid = apid;
+ if (apid < pmic_arb->min_apid)
+ pmic_arb->min_apid = apid;
- *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
+ *out_hwirq = spec_to_hwirq(intspec[0], intspec[1], intspec[2], apid);
*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
- dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
+ dev_dbg(&pmic_arb->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
return 0;
}
@@ -727,9 +744,9 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
unsigned int virq,
irq_hw_number_t hwirq)
{
- struct spmi_pmic_arb *pa = d->host_data;
+ struct spmi_pmic_arb *pmic_arb = d->host_data;
- dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
+ dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
@@ -737,24 +754,23 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
return 0;
}
-static int
-pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+static int pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
- u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
- u32 *mapping_table = pa->mapping_table;
+ u32 *mapping_table = pmic_arb->mapping_table;
int index = 0, i;
u16 apid_valid;
+ u16 apid;
u32 data;
- apid_valid = pa->ppid_to_apid[ppid];
- if (apid_valid & PMIC_ARB_CHAN_VALID) {
- *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
- return 0;
+ apid_valid = pmic_arb->ppid_to_apid[ppid];
+ if (apid_valid & PMIC_ARB_APID_VALID) {
+ apid = apid_valid & ~PMIC_ARB_APID_VALID;
+ return apid;
}
for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
- if (!test_and_set_bit(index, pa->mapping_table_valid))
- mapping_table[index] = readl_relaxed(pa->cnfg +
+ if (!test_and_set_bit(index, pmic_arb->mapping_table_valid))
+ mapping_table[index] = readl_relaxed(pmic_arb->cnfg +
SPMI_MAPPING_TABLE_REG(index));
data = mapping_table[index];
@@ -763,21 +779,21 @@ pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
} else {
- *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
- pa->ppid_to_apid[ppid]
- = *apid | PMIC_ARB_CHAN_VALID;
- pa->apid_data[*apid].ppid = ppid;
- return 0;
+ apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ pmic_arb->ppid_to_apid[ppid]
+ = apid | PMIC_ARB_APID_VALID;
+ pmic_arb->apid_data[apid].ppid = ppid;
+ return apid;
}
} else {
if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
} else {
- *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
- pa->ppid_to_apid[ppid]
- = *apid | PMIC_ARB_CHAN_VALID;
- pa->apid_data[*apid].ppid = ppid;
- return 0;
+ apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ pmic_arb->ppid_to_apid[ppid]
+ = apid | PMIC_ARB_APID_VALID;
+ pmic_arb->apid_data[apid].ppid = ppid;
+ return apid;
}
}
}
@@ -785,105 +801,178 @@ pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
return -ENODEV;
}
-static int
-pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
-{
- *mode = S_IRUSR | S_IWUSR;
- return 0;
-}
-
/* v1 offset per ee */
-static int
-pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
+static int pmic_arb_offset_v1(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
+ enum pmic_arb_channel ch_type)
{
- *offset = 0x800 + 0x80 * pa->channel;
- return 0;
+ return 0x800 + 0x80 * pmic_arb->channel;
}
-static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
+static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
+ struct apid_data *apidd = &pmic_arb->apid_data[pmic_arb->last_apid];
u32 regval, offset;
- u16 apid;
- u16 id;
-
- /*
- * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
- * ppid_to_apid is an in-memory invert of that table.
- */
- for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
- regval = readl_relaxed(pa->cnfg +
- SPMI_OWNERSHIP_TABLE_REG(apid));
- pa->apid_data[apid].owner = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+ u16 id, apid;
- offset = PMIC_ARB_REG_CHNL(apid);
- if (offset >= pa->core_size)
+ for (apid = pmic_arb->last_apid; ; apid++, apidd++) {
+ offset = pmic_arb->ver_ops->apid_map_offset(apid);
+ if (offset >= pmic_arb->core_size)
break;
- regval = readl_relaxed(pa->core + offset);
+ regval = readl_relaxed(pmic_arb->cnfg +
+ SPMI_OWNERSHIP_TABLE_REG(apid));
+ apidd->irq_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+ apidd->write_ee = apidd->irq_ee;
+
+ regval = readl_relaxed(pmic_arb->core + offset);
if (!regval)
continue;
id = (regval >> 8) & PMIC_ARB_PPID_MASK;
- pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
- pa->apid_data[apid].ppid = id;
+ pmic_arb->ppid_to_apid[id] = apid | PMIC_ARB_APID_VALID;
+ apidd->ppid = id;
if (id == ppid) {
- apid |= PMIC_ARB_CHAN_VALID;
+ apid |= PMIC_ARB_APID_VALID;
break;
}
}
- pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
+ pmic_arb->last_apid = apid & ~PMIC_ARB_APID_VALID;
return apid;
}
-
-static int
-pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+static int pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
- u16 ppid = (sid << 8) | (addr >> 8);
u16 apid_valid;
- apid_valid = pa->ppid_to_apid[ppid];
- if (!(apid_valid & PMIC_ARB_CHAN_VALID))
- apid_valid = pmic_arb_find_apid(pa, ppid);
- if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+ apid_valid = pmic_arb->ppid_to_apid[ppid];
+ if (!(apid_valid & PMIC_ARB_APID_VALID))
+ apid_valid = pmic_arb_find_apid(pmic_arb, ppid);
+ if (!(apid_valid & PMIC_ARB_APID_VALID))
return -ENODEV;
- *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+ return apid_valid & ~PMIC_ARB_APID_VALID;
+}
+
+static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
+{
+ struct apid_data *apidd = pmic_arb->apid_data;
+ struct apid_data *prev_apidd;
+ u16 i, apid, ppid;
+ bool valid, is_irq_ee;
+ u32 regval, offset;
+
+ /*
+ * In order to allow multiple EEs to write to a single PPID in arbiter
+ * version 5, there is more than one APID mapped to each PPID.
+ * The owner field for each of these mappings specifies the EE which is
+ * allowed to write to the APID. The owner of the last (highest) APID
+ * for a given PPID will receive interrupts from the PPID.
+ */
+ for (i = 0; ; i++, apidd++) {
+ offset = pmic_arb->ver_ops->apid_map_offset(i);
+ if (offset >= pmic_arb->core_size)
+ break;
+
+ regval = readl_relaxed(pmic_arb->core + offset);
+ if (!regval)
+ continue;
+ ppid = (regval >> 8) & PMIC_ARB_PPID_MASK;
+ is_irq_ee = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval);
+
+ regval = readl_relaxed(pmic_arb->cnfg +
+ SPMI_OWNERSHIP_TABLE_REG(i));
+ apidd->write_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+
+ apidd->irq_ee = is_irq_ee ? apidd->write_ee : INVALID_EE;
+
+ valid = pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID;
+ apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
+ prev_apidd = &pmic_arb->apid_data[apid];
+
+ if (valid && is_irq_ee &&
+ prev_apidd->write_ee == pmic_arb->ee) {
+ /*
+ * Duplicate PPID mapping after the one for this EE;
+ * override the irq owner
+ */
+ prev_apidd->irq_ee = apidd->irq_ee;
+ } else if (!valid || is_irq_ee) {
+ /* First PPID mapping or duplicate for another EE */
+ pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
+ }
+
+ apidd->ppid = ppid;
+ pmic_arb->last_apid = i;
+ }
+
+ /* Dump the mapping table for debug purposes. */
+ dev_dbg(&pmic_arb->spmic->dev, "PPID APID Write-EE IRQ-EE\n");
+ for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) {
+ apid = pmic_arb->ppid_to_apid[ppid];
+ if (apid & PMIC_ARB_APID_VALID) {
+ apid &= ~PMIC_ARB_APID_VALID;
+ apidd = &pmic_arb->apid_data[apid];
+ dev_dbg(&pmic_arb->spmic->dev, "%#03X %3u %2u %2u\n",
+ ppid, apid, apidd->write_ee, apidd->irq_ee);
+ }
+ }
+
return 0;
}
-static int
-pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+static int pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pmic_arb, u16 ppid)
+{
+ if (!(pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID))
+ return -ENODEV;
+
+ return pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
+}
+
+/* v2 offset per ppid and per ee */
+static int pmic_arb_offset_v2(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
+ enum pmic_arb_channel ch_type)
{
u16 apid;
+ u16 ppid;
int rc;
- rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+ ppid = sid << 8 | ((addr >> 8) & 0xFF);
+ rc = pmic_arb_ppid_to_apid_v2(pmic_arb, ppid);
if (rc < 0)
return rc;
- *mode = 0;
- *mode |= S_IRUSR;
-
- if (pa->ee == pa->apid_data[apid].owner)
- *mode |= S_IWUSR;
- return 0;
+ apid = rc;
+ return 0x1000 * pmic_arb->ee + 0x8000 * apid;
}
-/* v2 offset per ppid and per ee */
-static int
-pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
+/*
+ * v5 offset per ee and per apid for observer channels and per apid for
+ * read/write channels.
+ */
+static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
+ enum pmic_arb_channel ch_type)
{
u16 apid;
int rc;
+ u32 offset = 0;
+ u16 ppid = (sid << 8) | (addr >> 8);
- rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+ rc = pmic_arb_ppid_to_apid_v5(pmic_arb, ppid);
if (rc < 0)
return rc;
- *offset = 0x1000 * pa->ee + 0x8000 * apid;
- return 0;
+ apid = rc;
+ switch (ch_type) {
+ case PMIC_ARB_CHANNEL_OBS:
+ offset = 0x10000 * pmic_arb->ee + 0x80 * apid;
+ break;
+ case PMIC_ARB_CHANNEL_RW:
+ offset = 0x10000 * apid;
+ break;
+ }
+
+ return offset;
}
static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -896,55 +985,97 @@ static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
}
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
+static void __iomem *
+pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
+{
+ return pmic_arb->intr + 0x20 * m + 0x4 * n;
+}
+
+static void __iomem *
+pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
+{
+ return pmic_arb->intr + 0x100000 + 0x1000 * m + 0x4 * n;
+}
+
+static void __iomem *
+pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
+{
+ return pmic_arb->intr + 0x200000 + 0x1000 * m + 0x4 * n;
+}
+
+static void __iomem *
+pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
+{
+ return pmic_arb->intr + 0x10000 * m + 0x4 * n;
+}
+
+static void __iomem *
+pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
+{
+ return pmic_arb->intr + 0x200 + 0x4 * n;
+}
+
+static void __iomem *
+pmic_arb_acc_enable_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
+{
+ return pmic_arb->intr + 0x1000 * n;
+}
+
+static void __iomem *
+pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x20 * m + 0x4 * n;
+ return pmic_arb->wr_base + 0x100 + 0x10000 * n;
}
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
+static void __iomem *
+pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x100000 + 0x1000 * m + 0x4 * n;
+ return pmic_arb->intr + 0x600 + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
+static void __iomem *
+pmic_arb_irq_status_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x200000 + 0x1000 * m + 0x4 * n;
+ return pmic_arb->intr + 0x4 + 0x1000 * n;
}
-static u32 pmic_arb_acc_enable_v1(u16 n)
+static void __iomem *
+pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x200 + 0x4 * n;
+ return pmic_arb->wr_base + 0x104 + 0x10000 * n;
}
-static u32 pmic_arb_acc_enable_v2(u16 n)
+static void __iomem *
+pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x1000 * n;
+ return pmic_arb->intr + 0xA00 + 0x4 * n;
}
-static u32 pmic_arb_irq_status_v1(u16 n)
+static void __iomem *
+pmic_arb_irq_clear_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x600 + 0x4 * n;
+ return pmic_arb->intr + 0x8 + 0x1000 * n;
}
-static u32 pmic_arb_irq_status_v2(u16 n)
+static void __iomem *
+pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
- return 0x4 + 0x1000 * n;
+ return pmic_arb->wr_base + 0x108 + 0x10000 * n;
}
-static u32 pmic_arb_irq_clear_v1(u16 n)
+static u32 pmic_arb_apid_map_offset_v2(u16 n)
{
- return 0xA00 + 0x4 * n;
+ return 0x800 + 0x4 * n;
}
-static u32 pmic_arb_irq_clear_v2(u16 n)
+static u32 pmic_arb_apid_map_offset_v5(u16 n)
{
- return 0x8 + 0x1000 * n;
+ return 0x900 + 0x4 * n;
}
static const struct pmic_arb_ver_ops pmic_arb_v1 = {
.ver_str = "v1",
.ppid_to_apid = pmic_arb_ppid_to_apid_v1,
- .mode = pmic_arb_mode_v1_v3,
.non_data_cmd = pmic_arb_non_data_cmd_v1,
.offset = pmic_arb_offset_v1,
.fmt_cmd = pmic_arb_fmt_cmd_v1,
@@ -952,12 +1083,12 @@ static const struct pmic_arb_ver_ops pmic_arb_v1 = {
.acc_enable = pmic_arb_acc_enable_v1,
.irq_status = pmic_arb_irq_status_v1,
.irq_clear = pmic_arb_irq_clear_v1,
+ .apid_map_offset = pmic_arb_apid_map_offset_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v2 = {
.ver_str = "v2",
.ppid_to_apid = pmic_arb_ppid_to_apid_v2,
- .mode = pmic_arb_mode_v2,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
@@ -965,12 +1096,12 @@ static const struct pmic_arb_ver_ops pmic_arb_v2 = {
.acc_enable = pmic_arb_acc_enable_v2,
.irq_status = pmic_arb_irq_status_v2,
.irq_clear = pmic_arb_irq_clear_v2,
+ .apid_map_offset = pmic_arb_apid_map_offset_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v3 = {
.ver_str = "v3",
.ppid_to_apid = pmic_arb_ppid_to_apid_v2,
- .mode = pmic_arb_mode_v1_v3,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
@@ -978,6 +1109,20 @@ static const struct pmic_arb_ver_ops pmic_arb_v3 = {
.acc_enable = pmic_arb_acc_enable_v2,
.irq_status = pmic_arb_irq_status_v2,
.irq_clear = pmic_arb_irq_clear_v2,
+ .apid_map_offset = pmic_arb_apid_map_offset_v2,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v5 = {
+ .ver_str = "v5",
+ .ppid_to_apid = pmic_arb_ppid_to_apid_v5,
+ .non_data_cmd = pmic_arb_non_data_cmd_v2,
+ .offset = pmic_arb_offset_v5,
+ .fmt_cmd = pmic_arb_fmt_cmd_v2,
+ .owner_acc_status = pmic_arb_owner_acc_status_v5,
+ .acc_enable = pmic_arb_acc_enable_v5,
+ .irq_status = pmic_arb_irq_status_v5,
+ .irq_clear = pmic_arb_irq_clear_v5,
+ .apid_map_offset = pmic_arb_apid_map_offset_v5,
};
static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
@@ -987,37 +1132,34 @@ static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
static int spmi_pmic_arb_probe(struct platform_device *pdev)
{
- struct spmi_pmic_arb *pa;
+ struct spmi_pmic_arb *pmic_arb;
struct spmi_controller *ctrl;
struct resource *res;
void __iomem *core;
+ u32 *mapping_table;
u32 channel, ee, hw_ver;
int err;
- ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
if (!ctrl)
return -ENOMEM;
- pa = spmi_controller_get_drvdata(ctrl);
- pa->spmic = ctrl;
+ pmic_arb = spmi_controller_get_drvdata(ctrl);
+ pmic_arb->spmic = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- pa->core_size = resource_size(res);
- if (pa->core_size <= 0x800) {
- dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
- err = -EINVAL;
- goto err_put_ctrl;
- }
-
core = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(core)) {
err = PTR_ERR(core);
goto err_put_ctrl;
}
- pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
- sizeof(*pa->ppid_to_apid), GFP_KERNEL);
- if (!pa->ppid_to_apid) {
+ pmic_arb->core_size = resource_size(res);
+
+ pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+ sizeof(*pmic_arb->ppid_to_apid),
+ GFP_KERNEL);
+ if (!pmic_arb->ppid_to_apid) {
err = -ENOMEM;
goto err_put_ctrl;
}
@@ -1025,57 +1167,56 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
- pa->ver_ops = &pmic_arb_v1;
- pa->wr_base = core;
- pa->rd_base = core;
+ pmic_arb->ver_ops = &pmic_arb_v1;
+ pmic_arb->wr_base = core;
+ pmic_arb->rd_base = core;
} else {
- pa->core = core;
+ pmic_arb->core = core;
if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
- pa->ver_ops = &pmic_arb_v2;
+ pmic_arb->ver_ops = &pmic_arb_v2;
+ else if (hw_ver < PMIC_ARB_VERSION_V5_MIN)
+ pmic_arb->ver_ops = &pmic_arb_v3;
else
- pa->ver_ops = &pmic_arb_v3;
-
- /* the apid to ppid table starts at PMIC_ARB_REG_CHNL(0) */
- pa->max_periph = (pa->core_size - PMIC_ARB_REG_CHNL(0)) / 4;
+ pmic_arb->ver_ops = &pmic_arb_v5;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"obsrvr");
- pa->rd_base = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pa->rd_base)) {
- err = PTR_ERR(pa->rd_base);
+ pmic_arb->rd_base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pmic_arb->rd_base)) {
+ err = PTR_ERR(pmic_arb->rd_base);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"chnls");
- pa->wr_base = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pa->wr_base)) {
- err = PTR_ERR(pa->wr_base);
+ pmic_arb->wr_base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pmic_arb->wr_base)) {
+ err = PTR_ERR(pmic_arb->wr_base);
goto err_put_ctrl;
}
}
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
- pa->ver_ops->ver_str, hw_ver);
+ pmic_arb->ver_ops->ver_str, hw_ver);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
- pa->intr = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pa->intr)) {
- err = PTR_ERR(pa->intr);
+ pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pmic_arb->intr)) {
+ err = PTR_ERR(pmic_arb->intr);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
- pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pa->cnfg)) {
- err = PTR_ERR(pa->cnfg);
+ pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pmic_arb->cnfg)) {
+ err = PTR_ERR(pmic_arb->cnfg);
goto err_put_ctrl;
}
- pa->irq = platform_get_irq_byname(pdev, "periph_irq");
- if (pa->irq < 0) {
- err = pa->irq;
+ pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq");
+ if (pmic_arb->irq < 0) {
+ err = pmic_arb->irq;
goto err_put_ctrl;
}
@@ -1092,7 +1233,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- pa->channel = channel;
+ pmic_arb->channel = channel;
err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
if (err) {
@@ -1106,39 +1247,47 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- pa->ee = ee;
-
- pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
- sizeof(*pa->mapping_table), GFP_KERNEL);
- if (!pa->mapping_table) {
+ pmic_arb->ee = ee;
+ mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+ sizeof(*mapping_table), GFP_KERNEL);
+ if (!mapping_table) {
err = -ENOMEM;
goto err_put_ctrl;
}
+ pmic_arb->mapping_table = mapping_table;
/* Initialize max_apid/min_apid to the opposite bounds, during
* the irq domain translation, we are sure to update these */
- pa->max_apid = 0;
- pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
+ pmic_arb->max_apid = 0;
+ pmic_arb->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
platform_set_drvdata(pdev, ctrl);
- raw_spin_lock_init(&pa->lock);
+ raw_spin_lock_init(&pmic_arb->lock);
ctrl->cmd = pmic_arb_cmd;
ctrl->read_cmd = pmic_arb_read_cmd;
ctrl->write_cmd = pmic_arb_write_cmd;
+ if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
+ err = pmic_arb_read_apid_map_v5(pmic_arb);
+ if (err) {
+ dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
+ err);
+ goto err_put_ctrl;
+ }
+ }
+
dev_dbg(&pdev->dev, "adding irq domain\n");
- pa->domain = irq_domain_add_tree(pdev->dev.of_node,
- &pmic_arb_irq_domain_ops, pa);
- if (!pa->domain) {
+ pmic_arb->domain = irq_domain_add_tree(pdev->dev.of_node,
+ &pmic_arb_irq_domain_ops, pmic_arb);
+ if (!pmic_arb->domain) {
dev_err(&pdev->dev, "unable to create irq_domain\n");
err = -ENOMEM;
goto err_put_ctrl;
}
- irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
- enable_irq_wake(pa->irq);
-
+ irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq,
+ pmic_arb);
err = spmi_controller_add(ctrl);
if (err)
goto err_domain_remove;
@@ -1146,8 +1295,8 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
return 0;
err_domain_remove:
- irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
- irq_domain_remove(pa->domain);
+ irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
+ irq_domain_remove(pmic_arb->domain);
err_put_ctrl:
spmi_controller_put(ctrl);
return err;
@@ -1156,10 +1305,10 @@ err_put_ctrl:
static int spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
- struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
spmi_controller_remove(ctrl);
- irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
- irq_domain_remove(pa->domain);
+ irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
+ irq_domain_remove(pmic_arb->domain);
spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 6d23226e5f69..aa3edabc2b0f 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -466,27 +466,25 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
struct spmi_device *sdev;
u32 reg[2];
- dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
+ dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
err = of_property_read_u32_array(node, "reg", reg, 2);
if (err) {
dev_err(&ctrl->dev,
- "node %s err (%d) does not have 'reg' property\n",
- node->full_name, err);
+ "node %pOF err (%d) does not have 'reg' property\n",
+ node, err);
continue;
}
if (reg[1] != SPMI_USID) {
dev_err(&ctrl->dev,
- "node %s contains unsupported 'reg' entry\n",
- node->full_name);
+ "node %pOF contains unsupported 'reg' entry\n",
+ node);
continue;
}
if (reg[0] >= SPMI_MAX_SLAVE_ID) {
- dev_err(&ctrl->dev,
- "invalid usid on node %s\n",
- node->full_name);
+ dev_err(&ctrl->dev, "invalid usid on node %pOF\n", node);
continue;
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 69c0232a22f8..fb40dd0588b9 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -804,7 +804,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
req->request_type = TB_CFG_PKG_RESET;
req->response = &reply;
req->response_size = sizeof(reply);
- req->response_type = sizeof(TB_CFG_PKG_RESET);
+ req->response_type = TB_CFG_PKG_RESET;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index e9391bbd4036..53f40c57df59 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -807,11 +807,11 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
struct tb_switch *sw = tb_to_switch(dev);
u8 key[TB_SWITCH_KEY_SIZE];
ssize_t ret = count;
+ bool clear = false;
- if (count < 64)
- return -EINVAL;
-
- if (hex2bin(key, buf, sizeof(key)))
+ if (!strcmp(buf, "\n"))
+ clear = true;
+ else if (hex2bin(key, buf, sizeof(key)))
return -EINVAL;
if (mutex_lock_interruptible(&switch_lock))
@@ -821,15 +821,19 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
ret = -EBUSY;
} else {
kfree(sw->key);
- sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
- if (!sw->key)
- ret = -ENOMEM;
+ if (clear) {
+ sw->key = NULL;
+ } else {
+ sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+ if (!sw->key)
+ ret = -ENOMEM;
+ }
}
mutex_unlock(&switch_lock);
return ret;
}
-static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR(key, 0600, key_show, key_store);
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index f7b4d0f159e4..e8a5fdaee37d 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -184,7 +184,7 @@ static int udc_plat_probe(struct platform_device *pdev)
goto exit_phy;
}
- ret = extcon_get_cable_state_(udc->edev, EXTCON_USB);
+ ret = extcon_get_state(udc->edev, EXTCON_USB);
if (ret < 0) {
dev_err(dev, "Can't get cable state\n");
goto exit_extcon;
@@ -273,7 +273,7 @@ static int udc_plat_suspend(struct device *dev)
udc = dev_get_drvdata(dev);
stop_udc(udc);
- if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+ if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
dev_dbg(udc->dev, "device -> idle\n");
stop_udc(udc);
}
@@ -303,7 +303,7 @@ static int udc_plat_resume(struct device *dev)
return ret;
}
- if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+ if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
dev_dbg(udc->dev, "idle -> device\n");
start_udc(udc);
}
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index fd2e9da27c4b..f661695fb589 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -95,7 +95,8 @@ static struct {
struct ds1wm_data {
void __iomem *map;
- int bus_shift; /* # of shifts to calc register offsets */
+ unsigned int bus_shift; /* # of shifts to calc register offsets */
+ bool is_hw_big_endian;
struct platform_device *pdev;
const struct mfd_cell *cell;
int irq;
@@ -115,12 +116,65 @@ struct ds1wm_data {
static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
u8 val)
{
- __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift));
+ if (ds1wm_data->is_hw_big_endian) {
+ switch (ds1wm_data->bus_shift) {
+ case 0:
+ iowrite8(val, ds1wm_data->map + (reg << 0));
+ break;
+ case 1:
+ iowrite16be((u16)val, ds1wm_data->map + (reg << 1));
+ break;
+ case 2:
+ iowrite32be((u32)val, ds1wm_data->map + (reg << 2));
+ break;
+ }
+ } else {
+ switch (ds1wm_data->bus_shift) {
+ case 0:
+ iowrite8(val, ds1wm_data->map + (reg << 0));
+ break;
+ case 1:
+ iowrite16((u16)val, ds1wm_data->map + (reg << 1));
+ break;
+ case 2:
+ iowrite32((u32)val, ds1wm_data->map + (reg << 2));
+ break;
+ }
+ }
}
static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
{
- return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift));
+ u32 val = 0;
+
+ if (ds1wm_data->is_hw_big_endian) {
+ switch (ds1wm_data->bus_shift) {
+ case 0:
+ val = ioread8(ds1wm_data->map + (reg << 0));
+ break;
+ case 1:
+ val = ioread16be(ds1wm_data->map + (reg << 1));
+ break;
+ case 2:
+ val = ioread32be(ds1wm_data->map + (reg << 2));
+ break;
+ }
+ } else {
+ switch (ds1wm_data->bus_shift) {
+ case 0:
+ val = ioread8(ds1wm_data->map + (reg << 0));
+ break;
+ case 1:
+ val = ioread16(ds1wm_data->map + (reg << 1));
+ break;
+ case 2:
+ val = ioread32(ds1wm_data->map + (reg << 2));
+ break;
+ }
+ }
+ dev_dbg(&ds1wm_data->pdev->dev,
+ "ds1wm_read_register reg: %d, 32 bit val:%x\n", reg, val);
+ return (u8)val;
}
@@ -455,6 +509,7 @@ static int ds1wm_probe(struct platform_device *pdev)
struct ds1wm_driver_data *plat;
struct resource *res;
int ret;
+ u8 inten;
if (!pdev)
return -ENODEV;
@@ -473,9 +528,6 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!ds1wm_data->map)
return -ENOMEM;
- /* calculate bus shift from mem resource */
- ds1wm_data->bus_shift = resource_size(res) >> 3;
-
ds1wm_data->pdev = pdev;
ds1wm_data->cell = mfd_get_cell(pdev);
if (!ds1wm_data->cell)
@@ -484,6 +536,26 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!plat)
return -ENODEV;
+ /* how many bits to shift register number to get register offset */
+ if (plat->bus_shift > 2) {
+ dev_err(&ds1wm_data->pdev->dev,
+ "illegal bus shift %d, not written",
+ ds1wm_data->bus_shift);
+ return -EINVAL;
+ }
+
+ ds1wm_data->bus_shift = plat->bus_shift;
+ /* make sure resource has space for 8 registers */
+ if ((8 << ds1wm_data->bus_shift) > resource_size(res)) {
+ dev_err(&ds1wm_data->pdev->dev,
+ "memory resource size %d to small, should be %d\n",
+ (int)resource_size(res),
+ 8 << ds1wm_data->bus_shift);
+ return -EINVAL;
+ }
+
+ ds1wm_data->is_hw_big_endian = plat->is_hw_big_endian;
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res)
return -ENXIO;
@@ -491,15 +563,30 @@ static int ds1wm_probe(struct platform_device *pdev)
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
+ /* Mask interrupts, set IAS before claiming interrupt */
+ inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
+ ds1wm_write_register(ds1wm_data,
+ DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
+
if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
if (res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
+ if (res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+ irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_LEVEL_HIGH);
+ if (res->flags & IORESOURCE_IRQ_LOWLEVEL)
+ irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_LEVEL_LOW);
ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
IRQF_SHARED, "ds1wm", ds1wm_data);
- if (ret)
+ if (ret) {
+ dev_err(&ds1wm_data->pdev->dev,
+ "devm_request_irq %d failed with errno %d\n",
+ ds1wm_data->irq,
+ ret);
+
return ret;
+ }
ds1wm_up(ds1wm_data);
@@ -509,6 +596,13 @@ static int ds1wm_probe(struct platform_device *pdev)
if (ret)
goto err;
+ dev_dbg(&ds1wm_data->pdev->dev,
+ "ds1wm: probe successful, IAS: %d, rec.delay: %d, clockrate: %d, bus-shift: %d, is Hw Big Endian: %d\n",
+ plat->active_high,
+ plat->reset_recover_delay,
+ plat->clock_rate,
+ ds1wm_data->bus_shift,
+ ds1wm_data->is_hw_big_endian);
return 0;
err:
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index d49681cd29af..5b3e017d9276 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -70,6 +70,8 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
+/* extra configurations - e.g. 1WS */
+int extra_config;
/**
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
@@ -403,7 +405,7 @@ static u8 ds2482_w1_reset_bus(void *data)
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(0x00));
+ ds2482_calculate_config(extra_config));
}
mutex_unlock(&pdev->access_lock);
@@ -429,8 +431,7 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
ds2482_wait_1wire_idle(pdev);
/* note: it seems like both SPU and APU have to be set! */
retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(DS2482_REG_CFG_SPU |
- DS2482_REG_CFG_APU));
+ ds2482_calculate_config(extra_config|DS2482_REG_CFG_SPU|DS2482_REG_CFG_APU));
ds2482_wait_1wire_idle(pdev);
}
@@ -483,7 +484,7 @@ static int ds2482_probe(struct i2c_client *client,
/* Set all config items to 0 (off) */
ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(0x00));
+ ds2482_calculate_config(extra_config));
mutex_init(&data->access_lock);
@@ -558,4 +559,7 @@ module_i2c_driver(ds2482_driver);
MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
MODULE_DESCRIPTION("DS2482 driver");
+module_param(extra_config, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
+
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 46ccb2fc4f60..c423bdb982bb 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1088,7 +1088,7 @@ static void ds_disconnect(struct usb_interface *intf)
kfree(dev);
}
-static struct usb_device_id ds_id_table [] = {
+static const struct usb_device_id ds_id_table[] = {
{ USB_DEVICE(0x04fa, 0x2490) },
{ },
};
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index fb68465908f2..bb4d7ed2ce55 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -65,6 +65,14 @@ config W1_SLAVE_DS2423
Say Y here if you want to use a 1-wire
counter family device (DS2423).
+config W1_SLAVE_DS2805
+ tristate "112-byte EEPROM support (DS28E05)"
+ help
+ Say Y here if you want to use a 1-wire
+ is a 112-byte user-programmable EEPROM is
+ organized as 7 pages of 16 bytes each with 64bit
+ unique number. Requires OverDrive Speed to talk to.
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 54c63e420302..4622d8fed362 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
+obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index 6487fb772a20..bf641a191d07 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -51,7 +51,7 @@
#define DS2438_CURRENT_MSB 0x06
#define DS2438_THRESHOLD 0x07
-int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
+static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[2];
@@ -85,7 +85,7 @@ int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
return -1;
}
-int w1_ds2438_get_temperature(struct w1_slave *sl, int16_t *temperature)
+static int w1_ds2438_get_temperature(struct w1_slave *sl, int16_t *temperature)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
@@ -127,7 +127,7 @@ post_unlock:
return ret;
}
-int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
+static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[3];
@@ -186,7 +186,8 @@ int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
return -1;
}
-uint16_t w1_ds2438_get_voltage(struct w1_slave *sl, int adc_input, uint16_t *voltage)
+static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl,
+ int adc_input, uint16_t *voltage)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
new file mode 100644
index 000000000000..29348d283a65
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -0,0 +1,313 @@
+/*
+ * w1_ds2805 - w1 family 0d (DS28E05) driver
+ *
+ * Copyright (c) 2016 Andrew Worsley amworsley@gmail.com
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2805 0x0D
+
+#define W1_F0D_EEPROM_SIZE 128
+#define W1_F0D_PAGE_BITS 3
+#define W1_F0D_PAGE_SIZE (1<<W1_F0D_PAGE_BITS)
+#define W1_F0D_PAGE_MASK 0x0F
+
+#define W1_F0D_SCRATCH_BITS 1
+#define W1_F0D_SCRATCH_SIZE (1<<W1_F0D_SCRATCH_BITS)
+#define W1_F0D_SCRATCH_MASK (W1_F0D_SCRATCH_SIZE-1)
+
+#define W1_F0D_READ_EEPROM 0xF0
+#define W1_F0D_WRITE_EEPROM 0x55
+#define W1_F0D_RELEASE 0xFF
+
+#define W1_F0D_CS_OK 0xAA /* Chip Status Ok */
+
+#define W1_F0D_TPROG_MS 16
+
+#define W1_F0D_READ_RETRIES 10
+#define W1_F0D_READ_MAXLEN W1_F0D_EEPROM_SIZE
+
+/*
+ * Check the file size bounds and adjusts count as needed.
+ * This would not be needed if the file size didn't reset to 0 after a write.
+ */
+static inline size_t w1_f0d_fix_count(loff_t off, size_t count, size_t size)
+{
+ if (off > size)
+ return 0;
+
+ if ((off + count) > size)
+ return size - off;
+
+ return count;
+}
+
+/*
+ * Read a block from W1 ROM two times and compares the results.
+ * If they are equal they are returned, otherwise the read
+ * is repeated W1_F0D_READ_RETRIES times.
+ *
+ * count must not exceed W1_F0D_READ_MAXLEN.
+ */
+static int w1_f0d_readblock(struct w1_slave *sl, int off, int count, char *buf)
+{
+ u8 wrbuf[3];
+ u8 cmp[W1_F0D_READ_MAXLEN];
+ int tries = W1_F0D_READ_RETRIES;
+
+ do {
+ wrbuf[0] = W1_F0D_READ_EEPROM;
+ wrbuf[1] = off & 0x7f;
+ wrbuf[2] = 0;
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
+ w1_read_block(sl->master, buf, count);
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
+ w1_read_block(sl->master, cmp, count);
+
+ if (!memcmp(cmp, buf, count))
+ return 0;
+ } while (--tries);
+
+ dev_err(&sl->dev, "proof reading failed %d times\n",
+ W1_F0D_READ_RETRIES);
+
+ return -1;
+}
+
+static ssize_t w1_f0d_read_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int todo = count;
+
+ count = w1_f0d_fix_count(off, count, W1_F0D_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->mutex);
+
+ /* read directly from the EEPROM in chunks of W1_F0D_READ_MAXLEN */
+ while (todo > 0) {
+ int block_read;
+
+ if (todo >= W1_F0D_READ_MAXLEN)
+ block_read = W1_F0D_READ_MAXLEN;
+ else
+ block_read = todo;
+
+ if (w1_f0d_readblock(sl, off, block_read, buf) < 0) {
+ count = -EIO;
+ break;
+ }
+
+ todo -= W1_F0D_READ_MAXLEN;
+ buf += W1_F0D_READ_MAXLEN;
+ off += W1_F0D_READ_MAXLEN;
+ }
+
+ mutex_unlock(&sl->master->mutex);
+
+ return count;
+}
+
+/*
+ * Writes to the scratchpad and reads it back for verification.
+ * Then copies the scratchpad to EEPROM.
+ * The data must be aligned at W1_F0D_SCRATCH_SIZE bytes and
+ * must be W1_F0D_SCRATCH_SIZE bytes long.
+ * The master must be locked.
+ *
+ * @param sl The slave structure
+ * @param addr Address for the write
+ * @param len length must be <= (W1_F0D_PAGE_SIZE - (addr & W1_F0D_PAGE_MASK))
+ * @param data The data to write
+ * @return 0=Success -1=failure
+ */
+static int w1_f0d_write(struct w1_slave *sl, int addr, int len, const u8 *data)
+{
+ int tries = W1_F0D_READ_RETRIES;
+ u8 wrbuf[3];
+ u8 rdbuf[W1_F0D_SCRATCH_SIZE];
+ u8 cs;
+
+ if ((addr & 1) || (len != 2)) {
+ dev_err(&sl->dev, "%s: bad addr/len - addr=%#x len=%d\n",
+ __func__, addr, len);
+ return -1;
+ }
+
+retry:
+
+ /* Write the data to the scratchpad */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F0D_WRITE_EEPROM;
+ wrbuf[1] = addr & 0xff;
+ wrbuf[2] = 0xff; /* ?? from Example */
+
+ w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
+ w1_write_block(sl->master, data, len);
+
+ w1_read_block(sl->master, rdbuf, sizeof(rdbuf));
+ /* Compare what was read against the data written */
+ if ((rdbuf[0] != data[0]) || (rdbuf[1] != data[1])) {
+
+ if (--tries)
+ goto retry;
+
+ dev_err(&sl->dev,
+ "could not write to eeprom, scratchpad compare failed %d times\n",
+ W1_F0D_READ_RETRIES);
+ pr_info("%s: rdbuf = %#x %#x data = %#x %#x\n",
+ __func__, rdbuf[0], rdbuf[1], data[0], data[1]);
+
+ return -1;
+ }
+
+ /* Trigger write out to EEPROM */
+ w1_write_8(sl->master, W1_F0D_RELEASE);
+
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(W1_F0D_TPROG_MS);
+
+ /* Check CS (Command Status) == 0xAA ? */
+ cs = w1_read_8(sl->master);
+ if (cs != W1_F0D_CS_OK) {
+ dev_err(&sl->dev, "save to eeprom failed = CS=%#x\n", cs);
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t w1_f0d_write_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int addr, len;
+ int copy;
+
+ count = w1_f0d_fix_count(off, count, W1_F0D_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->mutex);
+
+ /* Can only write data in blocks of the size of the scratchpad */
+ addr = off;
+ len = count;
+ while (len > 0) {
+
+ /* if len too short or addr not aligned */
+ if (len < W1_F0D_SCRATCH_SIZE || addr & W1_F0D_SCRATCH_MASK) {
+ char tmp[W1_F0D_SCRATCH_SIZE];
+
+ /* read the block and update the parts to be written */
+ if (w1_f0d_readblock(sl, addr & ~W1_F0D_SCRATCH_MASK,
+ W1_F0D_SCRATCH_SIZE, tmp)) {
+ count = -EIO;
+ goto out_up;
+ }
+
+ /* copy at most to the boundary of the PAGE or len */
+ copy = W1_F0D_SCRATCH_SIZE -
+ (addr & W1_F0D_SCRATCH_MASK);
+
+ if (copy > len)
+ copy = len;
+
+ memcpy(&tmp[addr & W1_F0D_SCRATCH_MASK], buf, copy);
+ if (w1_f0d_write(sl, addr & ~W1_F0D_SCRATCH_MASK,
+ W1_F0D_SCRATCH_SIZE, tmp) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ } else {
+
+ copy = W1_F0D_SCRATCH_SIZE;
+ if (w1_f0d_write(sl, addr, copy, buf) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ }
+ buf += copy;
+ addr += copy;
+ len -= copy;
+ }
+
+out_up:
+ mutex_unlock(&sl->master->mutex);
+
+ return count;
+}
+
+static struct bin_attribute w1_f0d_bin_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = W1_F0D_EEPROM_SIZE,
+ .read = w1_f0d_read_bin,
+ .write = w1_f0d_write_bin,
+};
+
+static int w1_f0d_add_slave(struct w1_slave *sl)
+{
+ return sysfs_create_bin_file(&sl->dev.kobj, &w1_f0d_bin_attr);
+}
+
+static void w1_f0d_remove_slave(struct w1_slave *sl)
+{
+ sysfs_remove_bin_file(&sl->dev.kobj, &w1_f0d_bin_attr);
+}
+
+static struct w1_family_ops w1_f0d_fops = {
+ .add_slave = w1_f0d_add_slave,
+ .remove_slave = w1_f0d_remove_slave,
+};
+
+static struct w1_family w1_family_2d = {
+ .fid = W1_EEPROM_DS2805,
+ .fops = &w1_f0d_fops,
+};
+
+static int __init w1_f0d_init(void)
+{
+ pr_info("%s()\n", __func__);
+ return w1_register_family(&w1_family_2d);
+}
+
+static void __exit w1_f0d_fini(void)
+{
+ pr_info("%s()\n", __func__);
+ w1_unregister_family(&w1_family_2d);
+}
+
+module_init(w1_f0d_init);
+module_exit(w1_f0d_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Worsley amworsley@gmail.com");
+MODULE_DESCRIPTION("w1 family 0d driver for DS2805, 1kb EEPROM");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index cb3fc3c6b0d1..259525c3382a 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/hwmon.h>
#include <linux/w1.h>
@@ -59,6 +60,12 @@ struct w1_therm_family_data {
atomic_t refcnt;
};
+struct therm_info {
+ u8 rom[9];
+ u8 crc;
+ u8 verdict;
+};
+
/* return the address of the refcnt in the family data */
#define THERM_REFCNT(family_data) \
(&((struct w1_therm_family_data *)family_data)->refcnt)
@@ -107,19 +114,72 @@ static struct attribute *w1_ds28ea00_attrs[] = {
&dev_attr_w1_seq.attr,
NULL,
};
+
ATTRIBUTE_GROUPS(w1_therm);
ATTRIBUTE_GROUPS(w1_ds28ea00);
+#if IS_REACHABLE(CONFIG_HWMON)
+static int w1_read_temp(struct device *dev, u32 attr, int channel,
+ long *val);
+
+static umode_t w1_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return attr == hwmon_temp_input ? 0444 : 0;
+}
+
+static int w1_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return w1_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const u32 w1_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info w1_temp = {
+ .type = hwmon_temp,
+ .config = w1_temp_config,
+};
+
+static const struct hwmon_channel_info *w1_info[] = {
+ &w1_temp,
+ NULL
+};
+
+static const struct hwmon_ops w1_hwmon_ops = {
+ .is_visible = w1_is_visible,
+ .read = w1_read,
+};
+
+static const struct hwmon_chip_info w1_chip_info = {
+ .ops = &w1_hwmon_ops,
+ .info = w1_info,
+};
+#define W1_CHIPINFO (&w1_chip_info)
+#else
+#define W1_CHIPINFO NULL
+#endif
+
static struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_therm_groups,
+ .chip_info = W1_CHIPINFO,
};
static struct w1_family_ops w1_ds28ea00_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_ds28ea00_groups,
+ .chip_info = W1_CHIPINFO,
};
static struct w1_family w1_therm_family_DS18S20 = {
@@ -422,33 +482,31 @@ static ssize_t w1_slave_store(struct device *device,
return ret ? : size;
}
-static ssize_t w1_slave_show(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t read_therm(struct device *device,
+ struct w1_slave *sl, struct therm_info *info)
{
- struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
- u8 rom[9], crc, verdict, external_power;
- int i, ret, max_trying = 10;
- ssize_t c = PAGE_SIZE;
+ u8 external_power;
+ int ret, max_trying = 10;
u8 *family_data = sl->family_data;
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret != 0)
- goto post_unlock;
+ goto error;
- if (!sl->family_data) {
+ if (!family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto mt_unlock;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
- memset(rom, 0, sizeof(rom));
+ memset(info->rom, 0, sizeof(info->rom));
while (max_trying--) {
- verdict = 0;
- crc = 0;
+ info->verdict = 0;
+ info->crc = 0;
if (!w1_reset_select_slave(sl)) {
int count = 0;
@@ -474,47 +532,69 @@ static ssize_t w1_slave_show(struct device *device,
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto post_unlock;
+ goto dec_refcnt;
}
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret != 0)
- goto post_unlock;
+ goto dec_refcnt;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto pre_unlock;
+ goto dec_refcnt;
}
}
if (!w1_reset_select_slave(sl)) {
w1_write_8(dev, W1_READ_SCRATCHPAD);
- count = w1_read_block(dev, rom, 9);
+ count = w1_read_block(dev, info->rom, 9);
if (count != 9) {
dev_warn(device, "w1_read_block() "
"returned %u instead of 9.\n",
count);
}
- crc = w1_calc_crc8(rom, 8);
+ info->crc = w1_calc_crc8(info->rom, 8);
- if (rom[8] == crc)
- verdict = 1;
+ if (info->rom[8] == info->crc)
+ info->verdict = 1;
}
}
- if (verdict)
+ if (info->verdict)
break;
}
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(family_data));
+mt_unlock:
+ mutex_unlock(&dev->bus_mutex);
+error:
+ return ret;
+}
+
+static ssize_t w1_slave_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct therm_info info;
+ u8 *family_data = sl->family_data;
+ int ret, i;
+ ssize_t c = PAGE_SIZE;
+ u8 fid = sl->family->fid;
+
+ ret = read_therm(device, sl, &info);
+ if (ret)
+ return ret;
+
for (i = 0; i < 9; ++i)
- c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", rom[i]);
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
- crc, (verdict) ? "YES" : "NO");
- if (verdict)
- memcpy(family_data, rom, sizeof(rom));
+ info.crc, (info.verdict) ? "YES" : "NO");
+ if (info.verdict)
+ memcpy(family_data, info.rom, sizeof(info.rom));
else
dev_warn(device, "Read failed CRC check\n");
@@ -523,16 +603,42 @@ static ssize_t w1_slave_show(struct device *device,
((u8 *)family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
- w1_convert_temp(rom, sl->family->fid));
+ w1_convert_temp(info.rom, fid));
ret = PAGE_SIZE - c;
+ return ret;
+}
-pre_unlock:
- mutex_unlock(&dev->bus_mutex);
+#if IS_REACHABLE(CONFIG_HWMON)
+static int w1_read_temp(struct device *device, u32 attr, int channel,
+ long *val)
+{
+ struct w1_slave *sl = dev_get_drvdata(device);
+ struct therm_info info;
+ u8 fid = sl->family->fid;
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = read_therm(device, sl, &info);
+ if (ret)
+ return ret;
+
+ if (!info.verdict) {
+ ret = -EIO;
+ return ret;
+ }
+
+ *val = w1_convert_temp(info.rom, fid);
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
-post_unlock:
- atomic_dec(THERM_REFCNT(family_data));
return ret;
}
+#endif
#define W1_42_CHAIN 0x99
#define W1_42_CHAIN_OFF 0x3C
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 74471e7aa5cc..0c2a5a8327bd 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/hwmon.h>
#include <linux/atomic.h>
@@ -568,7 +569,7 @@ static struct attribute *w1_master_default_attrs[] = {
NULL
};
-static struct attribute_group w1_master_defattr_group = {
+static const struct attribute_group w1_master_defattr_group = {
.attrs = w1_master_default_attrs,
};
@@ -649,9 +650,24 @@ static int w1_family_notify(unsigned long action, struct w1_slave *sl)
return err;
}
}
-
+ if (IS_REACHABLE(CONFIG_HWMON) && fops->chip_info) {
+ struct device *hwmon
+ = hwmon_device_register_with_info(&sl->dev,
+ "w1_slave_temp", sl,
+ fops->chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(&sl->dev,
+ "could not create hwmon device\n");
+ } else {
+ sl->hwmon = hwmon;
+ }
+ }
break;
case BUS_NOTIFY_DEL_DEVICE:
+ if (IS_REACHABLE(CONFIG_HWMON) && fops->chip_info &&
+ sl->hwmon)
+ hwmon_device_unregister(sl->hwmon);
if (fops->remove_slave)
sl->family->fops->remove_slave(sl);
if (fops->groups)
@@ -729,6 +745,8 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
atomic_set(&sl->refcnt, 1);
atomic_inc(&sl->master->refcnt);
dev->slave_count++;
+ dev_info(&dev->dev, "Attaching one wire slave %02x.%012llx crc %02x\n",
+ rn->family, (unsigned long long)rn->id, rn->crc);
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index fb8507f521b2..ebcc8fb3fa66 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -28,6 +28,8 @@ static struct kobj_map *cdev_map;
static DEFINE_MUTEX(chrdevs_lock);
+#define CHRDEV_MAJOR_HASH_SIZE 255
+
static struct char_device_struct {
struct char_device_struct *next;
unsigned int major;
@@ -49,16 +51,39 @@ void chrdev_show(struct seq_file *f, off_t offset)
{
struct char_device_struct *cd;
- if (offset < CHRDEV_MAJOR_HASH_SIZE) {
- mutex_lock(&chrdevs_lock);
- for (cd = chrdevs[offset]; cd; cd = cd->next)
+ mutex_lock(&chrdevs_lock);
+ for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
+ if (cd->major == offset)
seq_printf(f, "%3d %s\n", cd->major, cd->name);
- mutex_unlock(&chrdevs_lock);
}
+ mutex_unlock(&chrdevs_lock);
}
#endif /* CONFIG_PROC_FS */
+static int find_dynamic_major(void)
+{
+ int i;
+ struct char_device_struct *cd;
+
+ for (i = ARRAY_SIZE(chrdevs)-1; i > CHRDEV_MAJOR_DYN_END; i--) {
+ if (chrdevs[i] == NULL)
+ return i;
+ }
+
+ for (i = CHRDEV_MAJOR_DYN_EXT_START;
+ i > CHRDEV_MAJOR_DYN_EXT_END; i--) {
+ for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
+ if (cd->major == i)
+ break;
+
+ if (cd == NULL || cd->major != i)
+ return i;
+ }
+
+ return -EBUSY;
+}
+
/*
* Register a single major with a specified minor range.
*
@@ -84,22 +109,21 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
mutex_lock(&chrdevs_lock);
- /* temporary */
if (major == 0) {
- for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
- if (chrdevs[i] == NULL)
- break;
- }
-
- if (i < CHRDEV_MAJOR_DYN_END)
- pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range\n",
- name, i);
-
- if (i == 0) {
- ret = -EBUSY;
+ ret = find_dynamic_major();
+ if (ret < 0) {
+ pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
+ name);
goto out;
}
- major = i;
+ major = ret;
+ }
+
+ if (major >= CHRDEV_MAJOR_MAX) {
+ pr_err("CHRDEV \"%s\" major requested (%d) is greater than the maximum (%d)\n",
+ name, major, CHRDEV_MAJOR_MAX);
+ ret = -EINVAL;
+ goto out;
}
cd->major = major;
diff --git a/fs/proc/devices.c b/fs/proc/devices.c
index 50493edc30e5..e5709343feb7 100644
--- a/fs/proc/devices.c
+++ b/fs/proc/devices.c
@@ -7,14 +7,14 @@ static int devinfo_show(struct seq_file *f, void *v)
{
int i = *(loff_t *) v;
- if (i < CHRDEV_MAJOR_HASH_SIZE) {
+ if (i < CHRDEV_MAJOR_MAX) {
if (i == 0)
seq_puts(f, "Character devices:\n");
chrdev_show(f, i);
}
#ifdef CONFIG_BLOCK
else {
- i -= CHRDEV_MAJOR_HASH_SIZE;
+ i -= CHRDEV_MAJOR_MAX;
if (i == 0)
seq_puts(f, "\nBlock devices:\n");
blkdev_show(f, i);
@@ -25,7 +25,7 @@ static int devinfo_show(struct seq_file *f, void *v)
static void *devinfo_start(struct seq_file *f, loff_t *pos)
{
- if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
+ if (*pos < (BLKDEV_MAJOR_MAX + CHRDEV_MAJOR_MAX))
return pos;
return NULL;
}
@@ -33,7 +33,7 @@ static void *devinfo_start(struct seq_file *f, loff_t *pos)
static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
- if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
+ if (*pos >= (BLKDEV_MAJOR_MAX + CHRDEV_MAJOR_MAX))
return NULL;
return pos;
}
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8449c1..b97be27e5a85 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -29,6 +29,8 @@ static inline u32 __bitrev32(u32 x)
#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+#define __bitrev8x4(x) (__bitrev32(swab32(x)))
+
#define __constant_bitrev32(x) \
({ \
u32 __x = x; \
@@ -50,6 +52,15 @@ static inline u32 __bitrev32(u32 x)
__x; \
})
+#define __constant_bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
#define __constant_bitrev8(x) \
({ \
u8 __x = x; \
@@ -75,6 +86,14 @@ static inline u32 __bitrev32(u32 x)
__bitrev16(__x); \
})
+#define bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8x4(__x) : \
+ __bitrev8x4(__x); \
+ })
+
#define bitrev8(x) \
({ \
u8 __x = x; \
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 7d410260661b..edfeaba95429 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -24,6 +24,12 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
+
+/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_TS 11
+#define ETM4_CFG_BIT_RETSTK 12
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 885f587a3555..915898759280 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -2,8 +2,7 @@
* Module: eeprom_93xx46
* platform description for 93xx46 EEPROMs.
*/
-
-struct gpio_desc;
+#include <linux/gpio/consumer.h>
struct eeprom_93xx46_platform_data {
unsigned char flags;
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7e206a9f88db..744d60ca80c3 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,5 @@
/*
- * External connector (extcon) class driver
+ * External Connector (extcon) framework
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -20,8 +20,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
-*/
+ */
#ifndef __LINUX_EXTCON_H__
#define __LINUX_EXTCON_H__
@@ -93,7 +92,7 @@
#define EXTCON_NUM 63
/*
- * Define the property of supported external connectors.
+ * Define the properties of supported external connectors.
*
* When adding the new extcon property, they *must* have
* the type/value/default information. Also, you *have to*
@@ -176,44 +175,42 @@ struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-/*
- * Following APIs are for notifiers or configurations.
- * Notifiers are the external port and connection devices.
- */
+/* Following APIs register/unregister the extcon device. */
extern int extcon_dev_register(struct extcon_dev *edev);
extern void extcon_dev_unregister(struct extcon_dev *edev);
extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
+ struct extcon_dev *edev);
extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+ struct extcon_dev *edev);
-/*
- * Following APIs control the memory of extcon device.
- */
+/* Following APIs allocate/free the memory of the extcon device. */
extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+/* Synchronize the state and property value for each external connector. */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
/*
- * get/set_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
+ * Following APIs get/set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
*/
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
+ bool state);
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
-/*
- * Synchronize the state and property data for a specific external connector.
- */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+ bool state);
/*
- * get/set_property access the property value of each external connector.
- * They are used to access the property of each cable based on the property id.
+ * Following APIs get/set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_get/set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
*/
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -224,28 +221,24 @@ extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-
-/*
- * get/set_property_capability set the capability of the property for each
- * external connector. They are used to set the capability of the property
- * of each external connector based on the id and property.
- */
extern int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor the status change of the external connectors.
+ * Following APIs register the notifier block in order to detect
+ * the change of both state and property value for each external connector.
+ *
* extcon_register_notifier(*edev, id, *nb) : Register a notifier block
* for specific external connector of the extcon.
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -265,16 +258,15 @@ extern void devm_extcon_unregister_notifier_all(struct device *dev,
struct notifier_block *nb);
/*
- * Following API get the extcon device from devicetree.
- * This function use phandle of devicetree to get extcon device directly.
+ * Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
+extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
-/* Following API to get information of extcon device */
+/* Following API get the name of extcon device. */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -284,13 +276,13 @@ static inline int extcon_dev_register(struct extcon_dev *edev)
static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
+ struct extcon_dev *edev)
{
return -EINVAL;
}
static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
+ struct extcon_dev *edev) { }
static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
@@ -300,7 +292,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -314,13 +306,13 @@ static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
@@ -331,52 +323,45 @@ static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value *prop_val)
+ unsigned int prop,
+ union extcon_property_value *prop_val)
{
return 0;
}
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
-static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
-{
- return NULL;
-}
-
static inline int extcon_register_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
@@ -392,8 +377,13 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
- int index)
+ int index)
{
return ERR_PTR(-ENODEV);
}
@@ -411,26 +401,14 @@ struct extcon_specific_cable_nb {
};
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
+ const char *extcon_name, const char *cable_name,
+ struct notifier_block *nb)
{
return -EINVAL;
}
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
- *obj)
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
return -EINVAL;
}
-
-static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
-{
- return extcon_get_state(edev, id);
-}
-
-static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
-{
- return extcon_set_state_sync(edev, id, cable_state);
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
index a5f0aa5c2a8d..3dc8a1b2db7b 100644
--- a/include/linux/fmc.h
+++ b/include/linux/fmc.h
@@ -132,6 +132,8 @@ struct fmc_operations {
uint32_t (*read32)(struct fmc_device *fmc, int offset);
void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
+ int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d,
+ void *gw, unsigned long len);
int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
char *name, int flags);
@@ -144,6 +146,8 @@ struct fmc_operations {
};
/* Prefer this helper rather than calling of fmc->reprogram directly */
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry);
extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
int sdb_entry);
@@ -180,6 +184,9 @@ struct fmc_device {
uint32_t device_id; /* Filled by the device */
char *mezzanine_name; /* Defaults to ``fmc'' */
void *mezzanine_data;
+
+ struct dentry *dbg_dir;
+ struct dentry *dbg_sdb_dump;
};
#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
@@ -217,14 +224,23 @@ static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
dev_set_drvdata(&fmc->dev, data);
}
-/* The 4 access points */
+struct fmc_gateware {
+ void *bitstream;
+ unsigned long len;
+};
+
+/* The 5 access points */
extern int fmc_driver_register(struct fmc_driver *drv);
extern void fmc_driver_unregister(struct fmc_driver *drv);
extern int fmc_device_register(struct fmc_device *tdev);
+extern int fmc_device_register_gw(struct fmc_device *tdev,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister(struct fmc_device *tdev);
-/* Two more for device sets, all driven by the same FPGA */
+/* Three more for device sets, all driven by the same FPGA */
extern int fmc_device_register_n(struct fmc_device **devs, int n);
+extern int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
/* Internal cross-calls between files; not exported to other modules */
@@ -232,6 +248,23 @@ extern int fmc_match(struct device *dev, struct device_driver *drv);
extern int fmc_fill_id_info(struct fmc_device *fmc);
extern void fmc_free_id_info(struct fmc_device *fmc);
extern void fmc_dump_eeprom(const struct fmc_device *fmc);
-extern void fmc_dump_sdb(const struct fmc_device *fmc);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
+extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio,
+ int ngpio);
+extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l);
+extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index b4ac24c4411d..bfa14bc023fb 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -67,10 +67,14 @@ enum fpga_mgr_states {
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
* FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
+#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
+#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
* struct fpga_image_info - information specific to a FPGA image
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2625fc47c7e5..cc2e0f5a8fd1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2473,9 +2473,13 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
#endif
/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE 255
+#define CHRDEV_MAJOR_MAX 512
/* Marks the bottom of the first segment of free char majors */
#define CHRDEV_MAJOR_DYN_END 234
+/* Marks the top and bottom of the second segment of free char majors */
+#define CHRDEV_MAJOR_DYN_EXT_START 511
+#define CHRDEV_MAJOR_DYN_EXT_END 384
+
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
@@ -2502,14 +2506,14 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_HASH_SIZE 255
+#define BLKDEV_MAJOR_MAX 512
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern void blkdev_show(struct seq_file *,off_t);
#else
-#define BLKDEV_MAJOR_HASH_SIZE 0
+#define BLKDEV_MAJOR_MAX 0
#endif
extern void init_special_inode(struct inode *, umode_t, dev_t);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b7d7bbec74e0..07650d0232cc 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -124,10 +124,7 @@ struct hv_ring_buffer_info {
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
- u32 ring_data_startoffset;
- u32 priv_write_index;
u32 priv_read_index;
- u32 cached_read_index;
};
/*
@@ -180,19 +177,6 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
-static inline u32 hv_get_cached_bytes_to_write(
- const struct hv_ring_buffer_info *rbi)
-{
- u32 read_loc, write_loc, dsize, write;
-
- dsize = rbi->ring_datasize;
- read_loc = rbi->cached_read_index;
- write_loc = rbi->ring_buffer->write_index;
-
- write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- return write;
-}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -895,6 +879,8 @@ struct vmbus_channel {
*/
enum hv_numa_policy affinity_policy;
+ bool probe_done;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -1474,55 +1460,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
}
/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static inline void hv_signal_on_read(struct vmbus_channel *channel)
-{
- u32 cur_write_sz, cached_write_sz;
- u32 pending_sz;
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- virt_mb();
-
- pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return;
-
- cur_write_sz = hv_get_bytes_to_write(rbi);
-
- if (cur_write_sz < pending_sz)
- return;
-
- cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
- vmbus_setevent(channel);
-}
-
-/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 190c8f4afa02..2b16e95b9bb8 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -285,6 +285,11 @@ enum host_event_code {
EC_HOST_EVENT_HANG_DETECT = 20,
/* Hang detect logic detected a hang and warm rebooted the AP */
EC_HOST_EVENT_HANG_REBOOT = 21,
+ /* PD MCU triggering host event */
+ EC_HOST_EVENT_PD_MCU = 22,
+
+ /* EC desires to change state of host-controlled USB mux */
+ EC_HOST_EVENT_USB_MUX = 28,
/*
* The high bit of the event mask is not used as a host event code. If
@@ -2905,6 +2910,76 @@ struct ec_params_usb_pd_control {
uint8_t mux;
} __packed;
+#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
+#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
+#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+
+struct ec_response_usb_pd_control_v1 {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ char state[32];
+} __packed;
+
+#define EC_CMD_USB_PD_PORTS 0x102
+
+struct ec_response_usb_pd_ports {
+ uint8_t num_ports;
+} __packed;
+
+#define EC_CMD_USB_PD_POWER_INFO 0x103
+
+#define PD_POWER_CHARGING_PORT 0xff
+struct ec_params_usb_pd_power_info {
+ uint8_t port;
+} __packed;
+
+enum usb_chg_type {
+ USB_CHG_TYPE_NONE,
+ USB_CHG_TYPE_PD,
+ USB_CHG_TYPE_C,
+ USB_CHG_TYPE_PROPRIETARY,
+ USB_CHG_TYPE_BC12_DCP,
+ USB_CHG_TYPE_BC12_CDP,
+ USB_CHG_TYPE_BC12_SDP,
+ USB_CHG_TYPE_OTHER,
+ USB_CHG_TYPE_VBUS,
+ USB_CHG_TYPE_UNKNOWN,
+};
+
+struct usb_chg_measures {
+ uint16_t voltage_max;
+ uint16_t voltage_now;
+ uint16_t current_max;
+ uint16_t current_lim;
+} __packed;
+
+struct ec_response_usb_pd_power_info {
+ uint8_t role;
+ uint8_t type;
+ uint8_t dualrole;
+ uint8_t reserved1;
+ struct usb_chg_measures meas;
+ uint32_t max_power;
+} __packed;
+
+/* Get info about USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_INFO 0x11a
+
+struct ec_params_usb_pd_mux_info {
+ uint8_t port; /* USB-C port number */
+} __packed;
+
+/* Flags representing mux state */
+#define USB_PD_MUX_USB_ENABLED (1 << 0)
+#define USB_PD_MUX_DP_ENABLED (1 << 1)
+#define USB_PD_MUX_POLARITY_INVERTED (1 << 2)
+#define USB_PD_MUX_HPD_IRQ (1 << 3)
+
+struct ec_response_usb_pd_mux_info {
+ uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
+} __packed;
+
/*****************************************************************************/
/*
* Passthru commands
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
index 38a372a0e285..2227c6a75d84 100644
--- a/include/linux/mfd/ds1wm.h
+++ b/include/linux/mfd/ds1wm.h
@@ -1,13 +1,28 @@
-/* MFD cell driver data for the DS1WM driver */
+/* MFD cell driver data for the DS1WM driver
+ *
+ * to be defined in the MFD device that is
+ * using this driver for one of his sub devices
+ */
struct ds1wm_driver_data {
int active_high;
int clock_rate;
- /* in milliseconds, the amount of time to */
- /* sleep following a reset pulse. Zero */
- /* should work if your bus devices recover*/
- /* time respects the 1-wire spec since the*/
- /* ds1wm implements the precise timings of*/
- /* a reset pulse/presence detect sequence.*/
+ /* in milliseconds, the amount of time to
+ * sleep following a reset pulse. Zero
+ * should work if your bus devices recover
+ * time respects the 1-wire spec since the
+ * ds1wm implements the precise timings of
+ * a reset pulse/presence detect sequence.
+ */
unsigned int reset_recover_delay;
+
+ /* Say 1 here for big endian Hardware
+ * (only relevant with bus-shift > 0
+ */
+ bool is_hw_big_endian;
+
+ /* left shift of register number to get register address offsett.
+ * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
+ */
+ unsigned int bus_shift;
};
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5577e1b773c4..ea96d4c82be7 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -13,6 +13,8 @@
#ifndef _LINUX_MUX_CONSUMER_H
#define _LINUX_MUX_CONSUMER_H
+#include <linux/compiler.h>
+
struct device;
struct mux_control;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index c2256d746543..4e85447f7860 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -12,6 +12,9 @@
#ifndef _LINUX_NVMEM_CONSUMER_H
#define _LINUX_NVMEM_CONSUMER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+
struct device;
struct device_node;
/* consumer cookie */
@@ -35,6 +38,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -85,6 +89,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -ENOSYS;
}
+static inline int nvmem_cell_read_u32(struct device *dev,
+ const char *cell_id, u32 *val)
+{
+ return -ENOSYS;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 90cbe7e65059..5b2972946dda 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -68,6 +68,7 @@ struct w1_reg_num {
* @family: module for device family type
* @family_data: pointer for use by the family module
* @dev: kernel device identifier
+ * @hwmon: pointer to hwmon device
*
*/
struct w1_slave {
@@ -83,6 +84,7 @@ struct w1_slave {
struct w1_family *family;
void *family_data;
struct device dev;
+ struct device *hwmon;
};
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
@@ -250,11 +252,13 @@ void w1_remove_master_device(struct w1_bus_master *master);
* @add_slave: add_slave
* @remove_slave: remove_slave
* @groups: sysfs group
+ * @chip_info: pointer to struct hwmon_chip_info
*/
struct w1_family_ops {
int (*add_slave)(struct w1_slave *sl);
void (*remove_slave)(struct w1_slave *sl);
const struct attribute_group **groups;
+ const struct hwmon_chip_info *chip_info;
};
/**
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 51f891fb1b18..84a9a0944e13 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -132,6 +132,7 @@ enum {
/* struct binder_fd_array_object - object describing an array of fds in a buffer
* @hdr: common header structure
+ * @pad: padding to ensure correct alignment
* @num_fds: number of file descriptors in the buffer
* @parent: index in offset array to buffer holding the fd array
* @parent_offset: start offset of fd array in the buffer
@@ -152,6 +153,7 @@ enum {
*/
struct binder_fd_array_object {
struct binder_object_header hdr;
+ __u32 pad;
binder_size_t num_fds;
binder_size_t parent;
binder_size_t parent_offset;
@@ -184,6 +186,19 @@ struct binder_version {
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes. ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ __u32 has_strong_ref;
+ __u32 has_weak_ref;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -191,6 +206,7 @@ struct binder_version {
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index d70829033bb7..d3fd428f4b92 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -10,6 +10,7 @@
# CONFIG_USELIB is not set
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_ASHMEM=y
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 26ae609a9448..457a1521f32f 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -138,14 +138,17 @@ void print_usage(char *argv[])
int main(int argc, char *argv[])
{
- int fcopy_fd, len;
+ int fcopy_fd;
int error;
int daemonize = 1, long_index = 0, opt;
int version = FCOPY_CURRENT_VERSION;
- char *buffer[4096 * 2];
- struct hv_fcopy_hdr *in_msg;
+ union {
+ struct hv_fcopy_hdr hdr;
+ struct hv_start_fcopy start;
+ struct hv_do_fcopy copy;
+ __u32 kernel_modver;
+ } buffer = { };
int in_handshake = 1;
- __u32 kernel_modver;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
@@ -195,32 +198,31 @@ int main(int argc, char *argv[])
* In this loop we process fcopy messages after the
* handshake is complete.
*/
- len = pread(fcopy_fd, buffer, (4096 * 2), 0);
+ ssize_t len;
+
+ len = pread(fcopy_fd, &buffer, sizeof(buffer), 0);
if (len < 0) {
syslog(LOG_ERR, "pread failed: %s", strerror(errno));
exit(EXIT_FAILURE);
}
if (in_handshake) {
- if (len != sizeof(kernel_modver)) {
+ if (len != sizeof(buffer.kernel_modver)) {
syslog(LOG_ERR, "invalid version negotiation");
exit(EXIT_FAILURE);
}
- kernel_modver = *(__u32 *)buffer;
in_handshake = 0;
- syslog(LOG_INFO, "kernel module version: %d",
- kernel_modver);
+ syslog(LOG_INFO, "kernel module version: %u",
+ buffer.kernel_modver);
continue;
}
- in_msg = (struct hv_fcopy_hdr *)buffer;
-
- switch (in_msg->operation) {
+ switch (buffer.hdr.operation) {
case START_FILE_COPY:
- error = hv_start_fcopy((struct hv_start_fcopy *)in_msg);
+ error = hv_start_fcopy(&buffer.start);
break;
case WRITE_TO_FILE:
- error = hv_copy_data((struct hv_do_fcopy *)in_msg);
+ error = hv_copy_data(&buffer.copy);
break;
case COMPLETE_FCOPY:
error = hv_copy_finished();
@@ -231,7 +233,7 @@ int main(int argc, char *argv[])
default:
syslog(LOG_ERR, "Unknown operation: %d",
- in_msg->operation);
+ buffer.hdr.operation);
}
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 88b20e007c05..eaa3bec273c8 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1136,7 +1136,7 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
int i = 0;
int j = 0;
char str[256];
- char sub_str[10];
+ char sub_str[13];
int offset = 0;
memset(addr, 0, sizeof(addr));
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 7ba54195934c..b2b4ebffab8c 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -21,6 +21,7 @@
#include <sys/types.h>
#include <sys/poll.h>
#include <sys/ioctl.h>
+#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <mntent.h>
@@ -30,6 +31,7 @@
#include <ctype.h>
#include <errno.h>
#include <linux/fs.h>
+#include <linux/major.h>
#include <linux/hyperv.h>
#include <syslog.h>
#include <getopt.h>
@@ -70,6 +72,7 @@ static int vss_operate(int operation)
char match[] = "/dev/";
FILE *mounts;
struct mntent *ent;
+ struct stat sb;
char errdir[1024] = {0};
unsigned int cmd;
int error = 0, root_seen = 0, save_errno = 0;
@@ -92,6 +95,10 @@ static int vss_operate(int operation)
while ((ent = getmntent(mounts))) {
if (strncmp(ent->mnt_fsname, match, strlen(match)))
continue;
+ if (stat(ent->mnt_fsname, &sb) == -1)
+ continue;
+ if (S_ISBLK(sb.st_mode) && major(sb.st_rdev) == LOOP_MAJOR)
+ continue;
if (hasmntopt(ent, MNTOPT_RO) != NULL)
continue;
if (strcmp(ent->mnt_type, "vfat") == 0)
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index 7d410260661b..edfeaba95429 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -24,6 +24,12 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
+
+/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_TS 11
+#define ETM4_CFG_BIT_RETSTK 12
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 7ce3d1a25133..fbfc055d3f4d 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -266,6 +266,32 @@ static u64 cs_etm_get_config(struct auxtrace_record *itr)
return config;
}
+#ifndef BIT
+#define BIT(N) (1UL << (N))
+#endif
+
+static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
+{
+ u64 config = 0;
+ u64 config_opts = 0;
+
+ /*
+ * The perf event variable config bits represent both
+ * the command line options and register programming
+ * bits in ETMv3/PTM. For ETMv4 we must remap options
+ * to real bits
+ */
+ config_opts = cs_etm_get_config(itr);
+ if (config_opts & BIT(ETM_OPT_CYCACC))
+ config |= BIT(ETM4_CFG_BIT_CYCACC);
+ if (config_opts & BIT(ETM_OPT_TS))
+ config |= BIT(ETM4_CFG_BIT_TS);
+ if (config_opts & BIT(ETM_OPT_RETSTK))
+ config |= BIT(ETM4_CFG_BIT_RETSTK);
+
+ return config;
+}
+
static size_t
cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
@@ -363,7 +389,7 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
magic = __perf_cs_etmv4_magic;
/* Get trace configuration register */
info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
- cs_etm_get_config(itr);
+ cs_etmv4_get_config(itr);
/* Get traceID from the framework */
info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
coresight_get_trace_id(cpu);